From: Rebecca N. Palmer Date: Wed, 26 Aug 2020 21:34:50 +0000 (+0100) Subject: pandas (1.0.5+dfsg-3) unstable; urgency=medium X-Git-Tag: archive/raspbian/1.0.5+dfsg-3+rpi1^2~33 X-Git-Url: https://dgit.raspbian.org/?a=commitdiff_plain;h=11cae5b000c9f74918b5b5335be0735aa2dea0d6;p=pandas.git pandas (1.0.5+dfsg-3) unstable; urgency=medium * Remove pytest-asyncio test-depends. * Remove numba test-depends on non-x86: at least s390x crashes. [dgit import unpatched pandas 1.0.5+dfsg-3] --- 11cae5b000c9f74918b5b5335be0735aa2dea0d6 diff --cc debian/README.source index 00000000,00000000..d2dc062b new file mode 100644 --- /dev/null +++ b/debian/README.source @@@ -1,0 -1,0 +1,8 @@@ ++pandas for Debian ++----------------- ++ ++For flexibility and easier interaction with upstream, packaging VCS is ++done on top of upstream's GIT hosted on github: ++git://github.com/wesm/pandas.git ++ ++ -- Yaroslav Halchenko , Tue, 13 Sep 2011 12:25:02 -0400 diff --cc debian/changelog index 00000000,00000000..a892f6cd new file mode 100644 --- /dev/null +++ b/debian/changelog @@@ -1,0 -1,0 +1,1187 @@@ ++pandas (1.0.5+dfsg-3) unstable; urgency=medium ++ ++ * Remove pytest-asyncio test-depends. ++ * Remove numba test-depends on non-x86: at least s390x crashes. ++ ++ -- Rebecca N. Palmer Wed, 26 Aug 2020 22:34:50 +0100 ++ ++pandas (1.0.5+dfsg-2) unstable; urgency=medium ++ ++ * Fix missing import and update numba submodule name in patches. ++ * Disable asyncio tests (workaround for #969050). ++ * Warn that numba may give wrong answers on non-x86, ++ and remove test-depends on mipsel. ++ * Skip a crashing test on s390x. ++ ++ -- Rebecca N. Palmer Wed, 26 Aug 2020 20:19:16 +0100 ++ ++pandas (1.0.5+dfsg-1) unstable; urgency=medium ++ ++ * Upstream bugfix release. Refresh patches, contributors_list. ++ * Fix invalid test xfails. ++ * Only Recommend numba on amd64, to reduce the risk of bugs. ++ * Don't test-depend on numba on ppc64el (where it crashes, #863511?) ++ or on ports architectures (where it mostly isn't available). ++ * Remove no longer needed test xfails/skips. ++ * Upload to unstable. (Closes: #950430) ++ ++ -- Rebecca N. Palmer Tue, 25 Aug 2020 20:07:50 +0100 ++ ++pandas (0.25.3+dfsg2-5) unstable; urgency=medium ++ ++ * Tests: ignore rounding difference on i386. (Closes: #968208) ++ ++ -- Rebecca N. Palmer Sun, 16 Aug 2020 20:09:14 +0100 ++ ++pandas (0.25.3+dfsg2-4) unstable; urgency=medium ++ ++ * Be compatible with matplotlib 3.3. (Closes: #966393) ++ * Docs: fix broken remote->local Javascript replacement. ++ ++ -- Rebecca N. Palmer Sun, 09 Aug 2020 22:11:25 +0100 ++ ++pandas (0.25.3+dfsg2-3) unstable; urgency=medium ++ ++ * Nested DataFrames may raise ValueError with numpy 1.19 ++ (upstream bug 32289). Clarify error message and xfail tests. ++ * Stop using a no-longer-existing numpy constant. ++ * Tests: ignore deprecations/rewordings and avoid setup exception ++ with numpy 1.19. (Together, the above Closes: #963817) ++ * Bump debhelper compat to 13. ++ * Fix HDFStore.flush (part of #877419) on s390x. ++ * Add NEWS.html.gz for Standards-Version 4.5.0. ++ (Choosing not to also add NEWS.gz as it would be large.) ++ * Tests: accept Hurd's errno and lack of multiprocessing. ++ * Docs: replace embedded Javascript copies with links. ++ ++ -- Rebecca N. Palmer Sun, 28 Jun 2020 21:47:22 +0100 ++ ++pandas (1.0.4+dfsg-1) experimental; urgency=medium ++ ++ * Upstream bugfix release. (Closes: #962335) ++ * Refresh patches, update contributors_list. ++ * Fix broken tests. ++ * Tests: allow numba to raise an error on 32 bit systems. ++ * Don't test-depend on numba on armel (where it crashes, ++ possibly #863508) or ppc64/riscv64 (where it isn't available). ++ * Xfail some more HDF5 tests on big-endian architectures. ++ ++ -- Rebecca N. Palmer Tue, 09 Jun 2020 22:19:23 +0100 ++ ++pandas (0.25.3+dfsg2-2) unstable; urgency=medium ++ ++ * Tests: don't fail on jedi deprecation warnings. ++ ++ -- Rebecca N. Palmer Thu, 07 May 2020 11:57:06 +0100 ++ ++pandas (1.0.3+dfsg2-1) experimental; urgency=medium ++ ++ * Upstream bugfix release, update contributors_list. ++ * Merge packaging from unstable, ++ but omit no longer needed clipboard warn/xfail. ++ * Only show the NaN -> datetime warning from float dtypes ++ (to avoid an exception while trying to check). ++ * Recommend numba, as we now have a recent enough version. ++ * Re-add dask test-dependency. ++ * Clarify non-x86 warnings, remove no longer needed xfails / ignores. ++ * Clean up whitespace and patch names/descriptions. ++ * Remove patches no longer needed. ++ * Network tests: use more stable URLs. ++ ++ -- Rebecca N. Palmer Wed, 06 May 2020 17:07:44 +0100 ++ ++pandas (0.25.3+dfsg2-1) unstable; urgency=medium ++ ++ * Remove inconveniently licensed (CC-BY-SA) snippets. ++ * Fix (harmless) SyntaxWarning on install. (Closes: #956021) ++ * Fix NaT sort order and test failures with numpy 1.18. ++ (Closes: #958531) ++ ++ -- Rebecca N. Palmer Wed, 06 May 2020 12:18:23 +0100 ++ ++pandas (0.25.3+dfsg-9) unstable; urgency=medium ++ ++ * Don't raise on import without matplotlib installed. Add test of this. ++ ++ -- Rebecca N. Palmer Fri, 03 Apr 2020 21:56:02 +0100 ++ ++pandas (0.25.3+dfsg-8) unstable; urgency=medium ++ ++ * Tests: don't fail on harmless changes in dependencies. (Closes: #954647) ++ ++ -- Rebecca N. Palmer Thu, 02 Apr 2020 18:53:32 +0100 ++ ++pandas (0.25.3+dfsg-7) unstable; urgency=medium ++ ++ * Fix another test failure due to our warnings. ++ * Skip rather than xfail crashing tests. ++ ++ -- Rebecca N. Palmer Wed, 26 Feb 2020 18:45:58 +0000 ++ ++pandas (0.25.3+dfsg-6) unstable; urgency=medium ++ ++ * Don't fail tests on our own warnings. ++ * Xfail some more HDF tests on non-x86 architectures. ++ * Warn that clipboard I/O is broken on big-endian architectures ++ and xfail test. ++ * Use pytest-forked to isolate (already xfailed) crashing test. ++ * Xfail tests that use no-longer-existing URLs. ++ ++ -- Rebecca N. Palmer Wed, 26 Feb 2020 07:40:25 +0000 ++ ++pandas (0.25.3+dfsg-5) unstable; urgency=medium ++ ++ * Backport packaging from experimental: ++ - Remove unnecessary test skips, and reorganize remaining ones. ++ - Use xfails instead of skips. ++ - Add warnings for the known non-x86 breakages ++ (NaN -> datetime #877754, HDF and Stata I/O #877419). ++ - Tell I/O tests where to find the source tree's test data ++ instead of skipping them. ++ - Stop using deprecated envvar/tag names. ++ - Use https for links where available. ++ ++ -- Rebecca N. Palmer Mon, 24 Feb 2020 22:38:26 +0000 ++ ++pandas (1.0.1+dfsg-1) experimental; urgency=medium ++ ++ * Upstream bugfix release. ++ * Refresh patches. ++ * Update and sort d/copyright, update contributors_list. ++ * Re-enable checking the test suite. ++ * Declare transition Breaks (see #950430). ++ * Add jinja2 recommends/test-depends. ++ * Fix test_to_numpy failure on big-endian systems. ++ * Register documentation in doc-base. (Closes: #879226) ++ * Remove no longer needed test xfails/skips, ++ and reorganize the remaining ones. ++ * Tell I/O tests where to find the source tree's test data ++ instead of skipping them. ++ * Enable multiarch. ++ * Temporarily drop dask test-dependency to avoid uninstallability. ++ * Add warnings for the known non-x86 breakages ++ (NaN -> datetime #877754, HDF and Stata I/O #877419). ++ ++ -- Rebecca N. Palmer Sun, 23 Feb 2020 17:13:08 +0000 ++ ++pandas (1.0.0+dfsg-1) experimental; urgency=medium ++ ++ * New upstream release. ++ * Upload to experimental, as this is an API break (see #950430). ++ * Drop patches applied upstream, refresh others. ++ * Update and improve d/copyright, update contributors_list. ++ * Xfail a test that fails in the C locale. ++ * Update and organize depends/recommends. ++ * Docs: use a sphinx theme we have, fix spelling, ++ link to rather than embed remote resource, ++ use https links where available. ++ * Stop using deprecated envvar/tag names. ++ * Xfail rather than skip previously broken tests, ++ and put the condition in the patch not d/rules or d/tests. ++ * Remove no longer used patch-stamp. ++ * Temporarily ignore the test suite to get a first build. ++ ++ -- Rebecca N. Palmer Sun, 02 Feb 2020 21:04:36 +0000 ++ ++pandas (0.25.3+dfsg-4) unstable; urgency=medium ++ ++ * No-change upload to unstable. (Closes: #937236, #931557) ++ ++ -- Rebecca N. Palmer Sun, 10 Nov 2019 16:35:41 +0000 ++ ++pandas (0.25.3+dfsg-3) experimental; urgency=medium ++ ++ * Fix autopkgtest. ++ ++ -- Rebecca N. Palmer Sat, 09 Nov 2019 10:29:47 +0000 ++ ++pandas (0.25.3+dfsg-2) experimental; urgency=medium ++ ++ * Split up the test suite to fit in memory on mipsel, ++ and stop ignoring it there. (Closes: #943732) ++ * Reproducibility: use correct path for stripping docs. ++ * Declare transition Breaks (see #931557). ++ * Tests: ignore warning from Python 3.8. ++ * Update d/copyright (some files have moved). ++ * Use local requirejs. ++ ++ -- Rebecca N. Palmer Fri, 08 Nov 2019 07:56:16 +0000 ++ ++pandas (0.25.3+dfsg-1) experimental; urgency=medium ++ ++ * Upstream bugfix release. ++ * Drop patch no longer needed. ++ * Update autopkgtest dependencies, drop unused link. ++ * Better document test skips, remove unnecessary ones. ++ * Reproducibility: strip timestamps and build paths, ++ use fixed random seeds for building documentation. ++ * Ignore test suite on mipsel. ++ ++ -- Rebecca N. Palmer Sat, 02 Nov 2019 22:26:31 +0000 ++ ++pandas (0.25.2+dfsg-2) experimental; urgency=medium ++ ++ * Correct path for contributors list, and don't fail when ++ not building the -doc package. ++ * Try again to fix test failure due to deb_nonversioneer_version. ++ * Skip some failing tests on non-Intel (see #943732), ++ require other tests to pass. ++ * Fix another typo. ++ ++ -- Rebecca N. Palmer Mon, 28 Oct 2019 22:06:10 +0000 ++ ++pandas (0.25.2+dfsg-1) experimental; urgency=medium ++ ++ [ Graham Inggs ] ++ * Skip python2 test_register_by_default on s390x ++ * Fix python2 test failures in certain locales ++ ++ [ Yaroslav Halchenko ] ++ * Recent upstream release ++ * Updated patches ++ * Adjusted for the gone ci/print_versions ++ * d/control ++ - added python{,3}-hypothesis to b-depends ++ ++ [ Rebecca N. Palmer ] ++ * New upstream release. ++ * Upload to experimental, as this is an API break (see #931557). ++ * Drop patches fixed upstream, refresh others. ++ * Remove Python 2 packages (see #937236). ++ * Use Python 3 in shebangs and subprocess calls. ++ * Re-enable building on Python 3.8. ++ * Use the new location of print_versions. ++ * Skip feather tests and remove build-dependency: ++ they now need pyarrow.feather, which isn't in Debian. ++ * Don't fail tests for our versioneer removal ++ or a differently worded error message. ++ * Add/update minimum dependency versions. ++ * Add numpydoc, nbconvert and pytest-xdist build-depends. ++ * Update d/copyright. ++ * Pre-generate a contributor list to avoid needing the git log ++ at build time (when it won't exist). ++ * Allow tests to fail for now. ++ ++ -- Rebecca N. Palmer Mon, 28 Oct 2019 07:53:21 +0000 ++ ++pandas (0.23.3+dfsg-8) unstable; urgency=medium ++ ++ * Examples dependencies: re-add statsmodels and xarray; ++ also add rpy2 and feather. ++ * Use packaged intersphinx indexes. (Closes: #876417) ++ * Use https for intersphinx links. ++ * Remove cythonized-files*. (They are regenerated on each build.) ++ * Remove test xfail, as statsmodels has now been fixed. ++ * Set Rules-Requires-Root: no. ++ * Make documentation Suggest the Python 3 version. ++ * Suggest statsmodels. ++ * Only use Python 3 sphinx, and mark it -Indep/nodoc. ++ * Bump debhelper compat to 12 and use debhelper-compat and pybuild. ++ * Remove pycompat and X-Python*-Version. ++ * Add missing d/copyright item. ++ * Remove obsolete TODOs. ++ * Clarify descriptions. ++ * Stop referring to examples that no longer exist. ++ * Fix typos. ++ * Remove old (no longer used) EXCLUDE_TESTS*. ++ * Deduplicate documentation files. ++ * Use Python 3 shebangs, and fix broken shebang. ++ * Add python3-ipykernel, -ipywidgets, -seaborn to ++ Build-Depends-Indep. ++ * Disable dh_auto_test: it fails, and we run the tests elsewhere. ++ * Mark test dependencies nocheck/nodoc. ++ * Remove old minimum versions / alternative dependencies. ++ * Build-depend on dh-python. ++ * Don't build on python3.8, as it will fail tests (see #931557). ++ ++ -- Rebecca N. Palmer Sun, 27 Oct 2019 11:38:37 +0000 ++ ++pandas (0.23.3+dfsg-7) unstable; urgency=medium ++ ++ * Revert test patch and use an xfail instead. ++ * Temporarily drop statsmodels+xarray Build-Depends, as they are ++ uninstallable until this is built. ++ * Add python3-xarray to autopkgtest Depends. ++ * Drop Python 2 autopkgtest (but keep build-time test). ++ * Remove duplicate Recommends. ++ ++ -- Rebecca N. Palmer Fri, 20 Sep 2019 08:01:37 +0100 ++ ++pandas (0.23.3+dfsg-6) unstable; urgency=medium ++ ++ * Team upload ++ * Avoid FTBFS with statsmodels 0.9.0 ++ * Add python3-statsmodels to autopkgtest Depends ++ ++ -- Graham Inggs Wed, 18 Sep 2019 13:46:01 +0000 ++ ++pandas (0.23.3+dfsg-5) unstable; urgency=medium ++ ++ * Team upload ++ * Add locales-all to Build-Depends and autopkgtest Depends in order to ++ consistently test in all avalable locales ++ * Add crh_UA to skip_noencoding_locales.patch ++ * Fix wrong debian/source/options exclude, thanks Steve Langasek ++ ++ -- Graham Inggs Wed, 18 Sep 2019 05:57:44 +0000 ++ ++pandas (0.23.3+dfsg-4) unstable; urgency=medium ++ ++ * Add self to Uploaders. ++ * Recommend .xls format support also in Python 3. (Closes: #880125) ++ * Tests: don't call fixtures, as this is an error in pytest 4+. ++ * Don't test datetime in locales with no encoding. ++ (These are broken by a Python stdlib bug.) ++ ++ -- Rebecca N. Palmer Sat, 14 Sep 2019 16:37:43 +0100 ++ ++pandas (0.23.3+dfsg-3) unstable; urgency=medium ++ ++ * Team upload. ++ * Make np.array @ Series act the right way round. (Closes: #923708) ++ * Replace #918206 fix with a fix that doesn't change the return type ++ and inplace-ness of np.array += DataFrame. (Closes: #923707) ++ * Fix missing page in documentation. ++ ++ -- Rebecca N. Palmer Wed, 06 Mar 2019 22:19:34 +0000 ++ ++pandas (0.23.3+dfsg-2) unstable; urgency=medium ++ ++ * Team upload. ++ * Don't fail the build on +dfsg versions. ++ * Fix another d/copyright issue. ++ * Add d/upstream/metadata. ++ ++ -- Rebecca N. Palmer Sat, 02 Mar 2019 14:57:12 +0000 ++ ++pandas (0.23.3+dfsg-1) unstable; urgency=medium ++ ++ * Team upload. ++ * Fix DataFrame @ np.array matrix multiplication. (Closes: #918206) ++ * Fix documentation build (Sphinx now defaults to Python 3). ++ (Closes: #804552, LP: #1803018) ++ * Add documentation examples dependencies. ++ * Update d/copyright. ++ * Remove unlicensed files. ++ ++ -- Rebecca N. Palmer Fri, 01 Mar 2019 23:02:18 +0000 ++ ++pandas (0.23.3-1) unstable; urgency=medium ++ ++ * New upstream release ++ * debian/patches ++ - many upstreamed patches are removed and others refreshed ++ ++ -- Yaroslav Halchenko Sat, 28 Jul 2018 00:39:32 -0400 ++ ++pandas (0.22.0-8) unstable; urgency=medium ++ ++ * Team Upload. ++ * patches: ++ + Add patch: deb_dont_call_py2_in_py3_test.patch ++ During python3 unit test, command 'python' is called by one of ++ the tests. When there is no python2 installation, tests such as ++ autopkgtest would fail. ++ * Put the conditionally applied patch to series' comment to avoid ++ lintianW: patch-file-present-but-not-mentioned-in-series. ++ * Trying to fix the autopkgtest: ++ + Leave a comment about the way to run unittest in the test control file. ++ + Synchronize B-D and autopkgtest depends. ++ + Allow output to stderr during test. ++ * Switch from nosetest to pytest. ++ * Synchronize pytest argument for rules and autopkgtest. ++ - Replace tests/unittest with the symlink pointed to tests/unittest3. ++ That scripts is smart enough to tell from py2 and py3, so we won't ++ need to write the same thing twice. ++ - Filter out intel tests on non-x86 architectures. ++ - Only enable "slow" tests on (Debian + x86) tester. "slow" tests may ++ consume too much memory to cause memory error or trigger OOM killer. ++ * control: ++ + Add missing python3 dependencies and sort the B-D list. ++ * Point Vcs-* fields to Salsa. ++ * Update Homepage to https://pandas.pydata.org/ . ++ * rules: ++ * Reverse the architecture filtering logic. ++ * Disable "slow" tests during build for non-x86 architectures. ++ This may significantly reduce the build time on those weak architectures. ++ * Don't specify the pytest marker expression twice. ++ The first expression will be overridden. ++ * Fix hardening flags. ++ - Cleanup the mess of unused nosetest exclusion expressions. ++ * Update lintian overrides. ++ + Override source-is-missing error, which is a false-positive triggered ++ by insane-line-length-in-source-file. ++ + Override insane-line-length-in-source-file because we have nothing ++ todo with lenghy lines in html. ++ * TODO: Point out that the unittest speed can be boosted with pytest-xdist. ++ ++ -- Mo Zhou Sun, 17 Jun 2018 16:01:16 +0000 ++ ++pandas (0.22.0-7) unstable; urgency=medium ++ ++ * Team Upload. ++ ++ [ Mo Zhou ] ++ * Remove patch: deb_fix_test_failure_test_basic_indexing, which is ++ unneeded for pandas >= 0.21 . (Closes: #900061) ++ ++ [ Graham Inggs ] ++ * Add riscv64 to the list of "not intel" architectures ++ * Update mark_tests_working_on_intel_armhf.patch ++ ++ -- Graham Inggs Tue, 29 May 2018 13:50:59 +0000 ++ ++pandas (0.22.0-6) unstable; urgency=medium ++ ++ * Team upload ++ * Fix FTBFS with Sphinx 1.7, thanks Dmitry Shachnev! ++ ++ -- Graham Inggs Tue, 24 Apr 2018 19:09:20 +0000 ++ ++pandas (0.22.0-5) unstable; urgency=medium ++ ++ * Team upload ++ * Add compatibility with Matplotlib 2.2 (Closes: #896673) ++ ++ -- Graham Inggs Mon, 23 Apr 2018 13:56:12 +0000 ++ ++pandas (0.22.0-4) unstable; urgency=medium ++ ++ * Team upload ++ * Fix more tests expecting little-endian results ++ * Fix heap corruption in read_csv on 32-bit, big-endian architectures ++ (Closes: #895890) ++ ++ -- Graham Inggs Sun, 22 Apr 2018 21:48:27 +0000 ++ ++pandas (0.22.0-3) unstable; urgency=medium ++ ++ * Team upload ++ * Refresh and re-enable mark_tests_working_on_intel.patch ++ * Fix test__get_dtype tests expecting little-endian results ++ ++ -- Graham Inggs Thu, 12 Apr 2018 11:04:21 +0000 ++ ++pandas (0.22.0-2) unstable; urgency=medium ++ ++ * debian/patches ++ - as upstream moved over to pytest from nose, no more nose imports were ++ in the code. Just adjusted patches to import nose where needed ++ * debian/rules ++ - specify LC_ALL=C locale to avoid crash while building docs ++ - add the 0001-TST-pytest-deprecation-warnings-GH17197-17253-reversed.patch ++ to the series if building on a system with an old pytest ++ ++ -- Yaroslav Halchenko Wed, 21 Feb 2018 23:44:58 -0500 ++ ++pandas (0.22.0-1) unstable; urgency=medium ++ ++ * Upstream release ++ * debian/patches ++ - refreshed many ++ - updated some ++ - added ++ - up_moto_optional to skip tests requiring moto (#777089) ++ - deb_skip_difffailingtests to skip two failing tests ++ (see https://github.com/pandas-dev/pandas/issues/19774) ++ - up_xlwt_optional to skip a test requiring xlwt ++ - deb_ndsphinx_optional to make nbsphinx optional. ++ Make nbsphinx not required in build-depends on systems with ++ older python-sphinx ++ - mark_tests_failing_on_386.patch ++ see https://github.com/pandas-dev/pandas/issues/19814 ++ - removed adopted upstream: ++ - dateutil-2.6.1-fixed-ambiguous-tz-dst-be.patch ++ - up_tst_np_argsort_comparison2 ++ - disabled for now: ++ - mark_tests_working_on_intel.patch ++ - up_tst_dont_assert_that_a_bug_exists_in_numpy ++ ++ -- Yaroslav Halchenko Wed, 21 Feb 2018 10:30:06 -0500 ++ ++pandas (0.20.3-11) unstable; urgency=medium ++ ++ * Team upload. ++ * Cherry-pick upstream commit 5f2b96bb637f6ddeec169c5ef8ad20013a03c853 ++ to workaround a numpy bug. (Closes: #884294) ++ + patches/up_tst_dont_assert_that_a_bug_exists_in_numpy ++ * Cherry-pick upstream commits to fix test failure caused by test_argsort(). ++ + patches/up_tst_np_argsort_comparison2 ++ * Workaround test failure of test_basic_indexing() in file ++ pandas/tests/series/test_indexing.py . ++ + patches/deb_fix_test_failure_test_basic_indexing ++ ++ -- Mo Zhou Sat, 20 Jan 2018 09:00:31 +0000 ++ ++pandas (0.20.3-10) unstable; urgency=medium ++ ++ * Team upload. ++ * Exclude more tests failing on mips, armhf and powerpc ++ ++ -- Andreas Tille Tue, 24 Oct 2017 21:26:02 +0200 ++ ++pandas (0.20.3-9) unstable; urgency=medium ++ ++ * Team upload. ++ * Add missing "import pytest" to two patched tests ++ * Secure URI in watch file ++ ++ -- Andreas Tille Tue, 24 Oct 2017 08:18:54 +0200 ++ ++pandas (0.20.3-8) unstable; urgency=medium ++ ++ * Team upload. ++ * Exclude one more test and de-activate non-working ignore of test errors ++ ++ -- Andreas Tille Mon, 23 Oct 2017 21:32:24 +0200 ++ ++pandas (0.20.3-7) unstable; urgency=medium ++ ++ * Team upload. ++ * debhelper 9 ++ * Use Debian packaged mathjax ++ * Do not Recommends python3-six since it is mentioned in Depends ++ * Remove redundant/outdated XS-Testsuite: autopkgtest ++ * Exclude one more test and de-activate non-working ignore of test errors ++ ++ -- Andreas Tille Mon, 23 Oct 2017 17:33:55 +0200 ++ ++pandas (0.20.3-6) unstable; urgency=medium ++ ++ * Team upload. ++ * Ignore test errors on some architectures ++ (Concerns bug #877419) ++ * Remove __pycache__ remainings from testing ++ * Standards-Version: 4.1.1 ++ * DEP3 for Google Analytics patch ++ * Complete Google Analytics patch ++ ++ -- Andreas Tille Mon, 23 Oct 2017 09:05:27 +0200 ++ ++pandas (0.20.3-5) unstable; urgency=medium ++ ++ * Make sure remainings of nose tests will not fail. That's a pretty stupid ++ patch since the tests are not using nose any more only some remaining ++ exceptions. Hope it will work anyway. ++ (Concerns bug #877419) ++ ++ -- Andreas Tille Mon, 16 Oct 2017 21:57:45 +0200 ++ ++pandas (0.20.3-4) unstable; urgency=medium ++ ++ * Mark those tests @pytest.mark.intel that pass only on Intel architectures ++ * d/rules: try to exclude tests that were marked "intel" ++ (Concerns bug #877419) ++ ++ -- Andreas Tille Sat, 14 Oct 2017 19:49:01 +0200 ++ ++pandas (0.20.3-3) unstable; urgency=medium ++ ++ * Team upload. ++ * Moved packaging from pkg-exppsy to Debian Science ++ * Exclude certain tests on certain architectures ++ (Concerns bug #877419) ++ ++ -- Andreas Tille Fri, 13 Oct 2017 20:52:53 +0200 ++ ++pandas (0.20.3-2) unstable; urgency=medium ++ ++ * debian/control ++ - boosted policy to 4.0.0 (I think we should be ok) ++ - drop statsmodels from build-depends to altogether avoid the circular ++ build-depends (Closes: #875805) ++ * Diane Trout: ++ - Add dateutil-2.6.1-fixed-ambiguous-tz-dst-be.patch (Closes: #875807) ++ ++ -- Yaroslav Halchenko Thu, 21 Sep 2017 16:11:29 -0400 ++ ++pandas (0.20.3-1) unstable; urgency=medium ++ ++ * Fresh upstream release ++ * debian/patches ++ - updated some, removed changeset*, and disabled possibly fixed upstream ++ ones ++ * debian/{control,rules} ++ - upstream switched to use pytest instead of nose ++ - enabled back all the tests for now ++ - added python-nbsphinx for b-depends, needed for docs ++ * debian/*.install ++ - no .so at the first level of subdirectories, now present on the third ++ ++ -- Yaroslav Halchenko Mon, 10 Jul 2017 20:00:59 -0400 ++ ++pandas (0.19.2-5.1) unstable; urgency=medium ++ ++ * Non-maintainer upload. ++ * Apply patch by Rebecca N. Palmer ++ Closes: #858260 ++ ++ -- Andreas Tille Sun, 02 Apr 2017 07:06:36 +0200 ++ ++pandas (0.19.2-5) unstable; urgency=medium ++ ++ * And one more test to skip on non-amd64 -- test_round_trip_valid_encodings ++ ++ -- Yaroslav Halchenko Thu, 12 Jan 2017 13:10:11 -0500 ++ ++pandas (0.19.2-4) unstable; urgency=medium ++ ++ * Exclude few more "plotting" tests on non-amd64 which cause FTBFS ++ on s390 ++ ++ -- Yaroslav Halchenko Thu, 12 Jan 2017 11:43:13 -0500 ++ ++pandas (0.19.2-3) unstable; urgency=medium ++ ++ * Brought back changeset_0699c89882133a41c250abdac02796fec84512e8.diff ++ which should resolve tests failures on BE platforms (wasn't yet ++ upstreamed within 0.19.x releases) ++ ++ -- Yaroslav Halchenko Thu, 12 Jan 2017 09:44:52 -0500 ++ ++pandas (0.19.2-2) unstable; urgency=medium ++ ++ * Exclude a number of tests while running on non-amd64 platforms ++ due to bugs in numpy/pandas ++ ++ -- Yaroslav Halchenko Wed, 11 Jan 2017 12:13:05 -0500 ++ ++pandas (0.19.2-1) unstable; urgency=medium ++ ++ * Fresh upstream minor release -- supposed to be bugfix but interacts ++ with current beta (1:1.12.0~b1-1) numpy leading to various failed tests ++ * debian/patches ++ - changeset_ae6a0a51cf41223394b7ef1038c210045d486cc8.diff ++ to guarantee the same Series dtype as of cut regardless of architecture ++ - up_buggy_overflows ++ workaround for inconsistent overflows while doing pow operation on big ++ ints ++ * debian/rules ++ - exclude more tests which are due to known issues in numpy beta and thus ++ not to be addressed directly in pandas ++ ++ -- Yaroslav Halchenko Wed, 04 Jan 2017 10:19:52 -0500 ++ ++pandas (0.19.1+git174-g81a2f79-1) experimental; urgency=medium ++ ++ * New upstream snapshot from v0.19.0-174-g81a2f79 ++ - lots of bugfixes since 0.19.1, so decided to test snapshot ++ ++ -- Yaroslav Halchenko Sat, 10 Dec 2016 22:43:19 -0500 ++ ++pandas (0.19.1-3) unstable; urgency=medium ++ ++ * Require cython >= 0.23 or otherwise use pre-cythoned sources ++ (should resolve https://github.com/pandas-dev/pandas/issues/14699 ++ on jessie) ++ * debian/control ++ - Build-Conflicts with python-tables 3.3.0-4 since that one leads to FTBFS ++ - boosted policy to 3.9.8 ++ * debian/rules ++ - Exclude few more tests which fail on big endian and other platforms ++ test_(msgpack|read_dta18) ++ * debian/patches ++ - changeset_0699c89882133a41c250abdac02796fec84512e8.diff ++ to compare in the tests against native endianness ++ ++ -- Yaroslav Halchenko Fri, 09 Dec 2016 15:49:50 -0500 ++ ++pandas (0.19.1-2) unstable; urgency=medium ++ ++ * debian/control ++ - Moved statsmodels build-depend (optional) under build-depends-indep ++ to break circular dependency. Thanks Stuart Prescott for the analysis ++ * debian/patches/ ++ - changeset_1309346c08945cd4764a549ec63cf51089634a45.diff ++ to not mask problem reading json leading to use of undefined variable ++ ++ -- Yaroslav Halchenko Sun, 27 Nov 2016 21:49:40 -0500 ++ ++pandas (0.19.1-1) unstable; urgency=medium ++ ++ * Fresh upstream release ++ ++ -- Yaroslav Halchenko Fri, 18 Nov 2016 12:19:54 -0500 ++ ++pandas (0.19.0+git14-ga40e185-1) unstable; urgency=medium ++ ++ * New upstream post-release (includes some bugfixes) snapshot ++ * debian/patches ++ - dropped changeset_ and up_ patches adopted upstream, refreshed the rest ++ * debian/rules,patches ++ - save debian-based version into __version.py, so doesn't conflict with ++ upstream tests of public API ++ - exclude for now test_expressions on python3 ++ (see https://github.com/pydata/pandas/issues/14269) ++ ++ -- Yaroslav Halchenko Thu, 13 Oct 2016 10:26:18 -0400 ++ ++pandas (0.18.1-1) unstable; urgency=medium ++ ++ * Fresh upstream release ++ * debian/patches/ ++ - changeset_46af7cf0f8e0477f6cc7454aa786a573228f0ac3.diff ++ to allow also AttributeError exception being thrown in the tests ++ (Closes: #827938) ++ - debian/patches/deb_skip_test_precision_i386 ++ removed (upstreamed) ++ ++ -- Yaroslav Halchenko Wed, 13 Jul 2016 10:42:00 -0400 ++ ++pandas (0.18.0+git114-g6c692ae-1) unstable; urgency=medium ++ ++ * debian/control ++ - added python{,3}-pkg-resources to direct Depends for the packages ++ (Closes: #821076) ++ ++ -- Yaroslav Halchenko Sun, 17 Apr 2016 20:49:25 -0400 ++ ++pandas (0.17.1-3) unstable; urgency=medium ++ ++ * debian/tests/unittest* ++ - set LC_ALL=C.UTF-8 for the tests run to prevent failure of test_set_locale ++ ++ -- Yaroslav Halchenko Tue, 08 Dec 2015 08:31:30 -0500 ++ ++pandas (0.17.1-2) unstable; urgency=medium ++ ++ * debian/control ++ - make -statsmodels and -tables optional build-depends on those platforms ++ where they are N/A atm. Added bdepends on python3-tables since available ++ ++ -- Yaroslav Halchenko Sun, 06 Dec 2015 12:58:26 -0500 ++ ++pandas (0.17.1-1) unstable; urgency=medium ++ ++ * Fresh upstream bugfix release ++ * debian/rules ++ - fixed deletion of moved away .so files ++ ++ -- Yaroslav Halchenko Fri, 27 Nov 2015 10:52:49 -0500 ++ ++pandas (0.17.0+git8-gcac4ad2-2) unstable; urgency=medium ++ ++ * Bug fix: install also msgpack/*.so extensions to -lib packages ++ ++ -- Yaroslav Halchenko Sat, 10 Oct 2015 13:52:54 -0400 ++ ++pandas (0.17.0+git8-gcac4ad2-1) unstable; urgency=medium ++ ++ * New upstream snapshot post release to pick up few bugfixes ++ - Started to trigger failures of test_constructor_compound_dtypes and ++ test_invalid_index_types -- disabled those for now, see ++ https://github.com/pydata/pandas/issues/11169 ++ * debian/rules ++ - Generate pandas/version.py if not present out of debian/changelog ++ upstream version information (versioneer wouldn't know since relies on ++ git) ++ ++ -- Yaroslav Halchenko Fri, 09 Oct 2015 21:35:23 -0400 ++ ++pandas (0.16.2+git65-g054821d-1) unstable; urgency=medium ++ ++ * Fresh upstream post-release snapshot (to pick up recent fixes etc) ++ (Closes: #787432) ++ * debian/{control,rules} ++ - build -doc package (Closes: #660900) ++ - add ipython (or alternative new ones from neurodebian) into ++ Build-Depends-Indep to build docs ++ - add python{,3}-{lxml,html5lib} to Build-Depends and Recommends ++ - use LC_ALL=C.UTF-8 while running tests ++ - exclude also test_set_locale since it fails ATM ++ see https://github.com/pydata/pandas/issues/10471 ++ ++ -- Yaroslav Halchenko Tue, 30 Jun 2015 17:26:54 -0400 ++ ++pandas (0.16.0~rc1-1) experimental; urgency=medium ++ ++ * New upstream release candidate ++ ++ -- Yaroslav Halchenko Fri, 13 Mar 2015 14:21:39 -0400 ++ ++pandas (0.15.2-1) unstable; urgency=medium ++ ++ * Fresh upstream release ++ ++ -- Yaroslav Halchenko Thu, 11 Dec 2014 09:51:57 -0500 ++ ++pandas (0.15.1+git125-ge463818-1) unstable; urgency=medium ++ ++ * New upstream snapshot from v0.15.1-125-ge463818. ++ * Upload to unstable during freeze since previous one in sid didn't make it ++ to jessie anyways ++ * debian/control ++ - remove versioning demand for cython (it would use pre-cythonized code on ++ older ones and there is no longer need in sid/jessie to enforce version). ++ As a consecuence -- removed all dsc patches pointing to ++ nocython3-dsc-patch, since no longer needed ++ ++ -- Yaroslav Halchenko Sun, 30 Nov 2014 21:09:36 -0500 ++ ++pandas (0.15.0-2) unstable; urgency=medium ++ ++ * debian/control ++ - specify minimal numpy to be 1.7 ++ * debian/patches ++ - deb_skip_stata_on_bigendians skip test_stata again on BE platforms ++ - deb_skip_test_precision_i386 skip test_precision_conversion on 32bit ++ ++ -- Yaroslav Halchenko Thu, 30 Oct 2014 23:09:13 -0400 ++ ++pandas (0.15.0-1) unstable; urgency=medium ++ ++ * New upstream release ++ * debian/control ++ - restrict statsmodels and matplotlib from being required on the ports ++ which do not have them ++ ++ -- Yaroslav Halchenko Sun, 26 Oct 2014 11:30:23 -0400 ++ ++pandas (0.14.1-2) unstable; urgency=medium ++ ++ * debian/patches/changeset_314012d.diff ++ - Fix converter test for MPL1.4 (Closes: #763709) ++ ++ -- Yaroslav Halchenko Mon, 06 Oct 2014 11:53:42 -0400 ++ ++pandas (0.14.1-1) unstable; urgency=medium ++ ++ * New upstream release ++ ++ -- Yaroslav Halchenko Thu, 10 Jul 2014 23:38:49 -0400 ++ ++pandas (0.14.0+git393-g959e3e4-1) UNRELEASED; urgency=medium ++ ++ * New upstream snapshot from v0.14.0-345-g8cd3dd6 ++ * debian/rules ++ - disable running disabled tests to prevent clipboard tests failures ++ under kfreebsd kernels ++ ++ -- Yaroslav Halchenko Mon, 07 Jul 2014 12:29:50 -0400 ++ ++pandas (0.14.0+git213-g741b2fa-1) experimental; urgency=medium ++ ++ * New upstream snapshot from v0.14.0-213-g741b2fa. ++ ++ -- Yaroslav Halchenko Thu, 19 Jun 2014 10:30:42 -0400 ++ ++pandas (0.14.0+git17-g3849d5d-1) unstable; urgency=medium ++ ++ * New upstream snapshot from v0.14.0-17-g3849d5d -- has resolved a number ++ of bugs sneaked into 0.14.0 release, and caused FTBFS on some platforms ++ and backports ++ ++ -- Yaroslav Halchenko Sun, 01 Jun 2014 00:54:34 -0400 ++ ++pandas (0.14.0-1) unstable; urgency=medium ++ ++ * New upstream release ++ ++ -- Yaroslav Halchenko Fri, 30 May 2014 08:45:35 -0400 ++ ++pandas (0.14.0~rc1+git79-g1fa5dd4-1) experimental; urgency=medium ++ ++ * New upstream snapshot from v0.14.0rc1-73-g8793356 ++ * debian/patches: ++ - dropped CPed changeset_*s ++ - added deb_disable_googleanalytics ++ * debian/control: ++ - boosted policy compliance to 3.9.5 ++ ++ -- Yaroslav Halchenko Tue, 27 May 2014 16:00:00 -0400 ++ ++pandas (0.13.1-2) unstable; urgency=low ++ ++ * debian/patches/changeset_6d56e7300d66d3ba76684334bbb44b6cd0ea9f61.diff ++ to fix FTBFS of statsmodels due to failing tests (Closes: #735804) ++ ++ -- Yaroslav Halchenko Sat, 08 Feb 2014 12:46:42 -0500 ++ ++pandas (0.13.1-1) unstable; urgency=low ++ ++ * Fresh upstream release ++ * debian/patches ++ - deb_skip_test_pytables_failure to mitigate error while testing on ++ amd64 wheezy and ubuntu 12.04 ++ ++ -- Yaroslav Halchenko Tue, 04 Feb 2014 12:09:29 -0500 ++ ++pandas (0.13.0+git464-g15a8ff7-1) experimental; urgency=low ++ ++ * Fresh pre-release snapshot ++ * debian/patches ++ - removed all cherry-picked patches (should have been upstreamed) ++ ++ -- Yaroslav Halchenko Wed, 29 Jan 2014 21:27:45 -0500 ++ ++pandas (0.13.0-2) unstable; urgency=low ++ ++ * debian/patches ++ - 0001-BLD-fix-cythonized-msgpack-extension-in-setup.py-GH5.patch ++ to resolve issue with building C++ Cython extension using ++ pre-generated sources ++ - 0001-Add-division-future-import-everywhere.patch ++ 0002-remove-explicit-truediv-kwarg.patch ++ to resolve compatibility issues with elderly Numexpr ++ - 0001-BUG-Yahoo-finance-changed-ichart-url.-Fixed-here.patch ++ - deb_skip_sequencelike_on_armel to prevent FTBFS on armel due to failing ++ test: https://github.com/pydata/pandas/issues/4473 ++ ++ -- Yaroslav Halchenko Fri, 03 Jan 2014 23:13:48 -0500 ++ ++pandas (0.13.0-1) unstable; urgency=low ++ ++ * Fresh upstream release ++ - resolved compatibility with matplotlib 1.3 (Closes: #733848) ++ * debian/{control,rules} ++ - use xvfb (added to build-depends together with xauth, and xclip) ++ for tests ++ - define http*_proxy to prevent downloads ++ - install .md files not .rst for docs -- were renamed upstream ++ - include .cpp Cython generated files into debian/cythonized-files* ++ ++ -- Yaroslav Halchenko Wed, 01 Jan 2014 18:08:22 -0500 ++ ++pandas (0.12.0-2) unstable; urgency=low ++ ++ [ Dmitry Shachnev ] ++ * DEP-8 tests improvements: ++ - Use Xvfb for running tests. ++ - Increase verbosity using -v flag. ++ - Fix printing interpreter version in unittests3. ++ * Fix indentaion in debian/control. ++ ++ [ Yaroslav Halchenko ] ++ * debian/control ++ - place python3-matplotlib ahead of elderly python-matplotlib without ++ python3 support since now we have python3-matplotlib in sid ++ * debian/copyright ++ - go through reported missing copyright/license statements (Closes: ++ #700564) Thanks Luca Falavigna for the report ++ * debian/rules,patches ++ - exclude test test_bar_log due to incompatibility with matplotlib 1.3.0 (test ++ adjusted upstream and would be re-enabled for the new release). ++ - debian/patches/changeset_952c5f0bc433622d21df20ed761ee4cb728370eb.diff ++ adds matplotlib 1.3.0 compatibility ++ ++ -- Yaroslav Halchenko Sat, 14 Sep 2013 20:02:58 -0400 ++ ++pandas (0.12.0-1) unstable; urgency=low ++ ++ * New upstream release: ++ - should address failed tests on 32bit platforms ++ * debian/patches ++ - neurodebian: allow to build for jessie with outdated cython ++ * debian/control ++ - build for Python2 >= 2.7 due to some (probably temporary) incompatibilities ++ in tests with 2.6 ++ ++ -- Yaroslav Halchenko Wed, 24 Jul 2013 23:29:03 -0400 ++ ++pandas (0.12.0~rc1+git127-gec8920a-1) experimental; urgency=low ++ ++ * New upstream snapshot from origin/master at v0.12.0rc1-127-gec8920a ++ - should address FTBFS due to failing tests on big endians ++ ++ -- Yaroslav Halchenko Sat, 20 Jul 2013 09:23:04 -0400 ++ ++pandas (0.12.0~rc1+git112-gb79996c-1) experimental; urgency=low ++ ++ * Fresh git snapshot of upstream candidate release. Experimental build ++ to verify functioning across the ports. ++ * debian/control ++ - dedented last "paragraph" to break it away from the 2nd one. ++ Thanks Beatrice Torracca for the detailed report (Closes: #712260) ++ - Depends on python-six now ++ * debian/{,tests/}control ++ - added python{,3}-bs4, python-html5lib to Build-Depends for more ++ thorough testing ++ ++ -- Yaroslav Halchenko Thu, 18 Jul 2013 13:15:19 -0400 ++ ++pandas (0.11.0-2) unstable; urgency=low ++ ++ [ Yaroslav Halchenko ] ++ * Upload to unstable -- this upstream release addressed Cython 0.19 ++ compatibility issue (Closes: #710608) ++ * Recommends numexpr ++ * Re-cythonized using Cython 0.19 ++ ++ [ Dmitry Shachnev ] ++ * debian/tests/unittests3: use nosetests3 instead of nosetests-3.x. ++ ++ -- Yaroslav Halchenko Mon, 03 Jun 2013 11:57:43 -0400 ++ ++pandas (0.11.0-1) experimental; urgency=low ++ ++ * New upstream release ++ ++ -- Yaroslav Halchenko Tue, 23 Apr 2013 22:40:15 -0400 ++ ++pandas (0.10.1-1) experimental; urgency=low ++ ++ * New upstream release ++ ++ -- Yaroslav Halchenko Tue, 22 Jan 2013 13:07:31 -0500 ++ ++pandas (0.10.0-1) experimental; urgency=low ++ ++ * New upstream release ++ - drops python 2.5 support (we are dropping pyversions in favor of ++ X-Python-Version) ++ * debian/patches: ++ - all previous are in upstream now, dropped locally ++ - added -dsc-patch'es for systems without cython3 ++ * debian/control: ++ - added python-statsmodels for the extended tests coverage ++ ++ -- Yaroslav Halchenko Mon, 17 Dec 2012 12:27:25 -0500 ++ ++pandas (0.9.1-2) unstable; urgency=low ++ ++ [ Julian Taylor ] ++ * Provide python3 packages ++ * Add autopkgtests ++ * debian/patches: ++ - relax-float-tests.patch: ++ replace float equality tests with almost equal ++ - fix-endian-tests.patch: ++ patch from upstream to fix the test failure on big endian machines ++ ++ [ Yaroslav Halchenko ] ++ * Upload to unstable ++ * Dropping pysupport ++ * debian/rules: ++ - slight reduction of code duplication between python 2 and 3 ++ - cythonize for both python 2 and 3 into separate directories ++ ++ -- Yaroslav Halchenko Sat, 01 Dec 2012 22:57:47 -0500 ++ ++pandas (0.9.1-1) experimental; urgency=low ++ ++ * New upstream release ++ * Boosted policy to 3.9.3 (no due changes) ++ * debian/rules ++ - Fixed up cleaning up of cythonized files ++ ++ -- Yaroslav Halchenko Wed, 14 Nov 2012 09:44:14 -0500 ++ ++pandas (0.9.0-1) experimental; urgency=low ++ ++ * New upstream release ++ ++ -- Yaroslav Halchenko Sun, 07 Oct 2012 21:26:23 -0400 ++ ++pandas (0.9.0~rc2-1) experimental; urgency=low ++ ++ * New upstream release candidate ++ ++ -- Yaroslav Halchenko Fri, 21 Sep 2012 10:27:52 -0400 ++ ++pandas (0.8.1-1) unstable; urgency=low ++ ++ * Primarily a bugfix upstream release. ++ * up_tag_yahoo_test_requiring_network patch removed. ++ ++ -- Yaroslav Halchenko Sun, 22 Jul 2012 20:13:16 -0400 ++ ++pandas (0.8.0-2) unstable; urgency=medium ++ ++ * up_tag_yahoo_test_requiring_network patch cherry-picked from upstream ++ GIT so that tests would not be excercised at package build time ++ (Closes: #681449) ++ ++ -- Yaroslav Halchenko Fri, 13 Jul 2012 08:54:41 -0400 ++ ++pandas (0.8.0-1) unstable; urgency=low ++ ++ * Fresh upstream release ++ * debian/control ++ - drop python-statsmodels from Build-Depends since it might not be yet ++ available on some architectures and is not critical for the test ++ - recommend python-statsmodels instead of deprecated ++ python-scikits.statsmodels ++ ++ -- Yaroslav Halchenko Fri, 29 Jun 2012 13:02:28 -0400 ++ ++pandas (0.8.0~rc2+git26-g76c6351-1) experimental; urgency=low ++ ++ * Fresh upstream release candidate ++ - all patches dropped (upstreamed) ++ - requires numpy >= 1.6 ++ ++ -- Yaroslav Halchenko Tue, 12 Jun 2012 13:23:27 -0400 ++ ++pandas (0.7.3-1) unstable; urgency=low ++ ++ * Fresh upstream release ++ - few post-release patches (submitted upstream) to exclude unittests ++ requiring network access ++ * debian/control: ++ - python-openpyxl, python-xlwt, python-xlrd into Build-Depends ++ and Recommends ++ * debian/rules: ++ - exclude running tests marked with @network ++ ++ -- Yaroslav Halchenko Thu, 12 Apr 2012 11:27:31 -0400 ++ ++pandas (0.7.1+git1-ga2e86c2-1) unstable; urgency=low ++ ++ * New upstream release with a bugfix which followed ++ ++ -- Yaroslav Halchenko Thu, 01 Mar 2012 22:28:10 -0500 ++ ++pandas (0.7.0-1) unstable; urgency=low ++ ++ * New upstream release ++ * Updated pre-cythoned .c files for older Debian/Ubuntu releases. ++ Added a stamp file with upstream version to assure up-to-dateness ++ of the generated files ++ * Dropped all exclusions of unittests and patches -- shouldn't be necessary ++ any longer ++ * Build only for requested versions (not all supported) of Python ++ * Do nothing for build operation, rely on overloaded install ++ (to avoid undesired re-cythonization on elderly Ubuntus) ++ * Adjusted url in watch due to migration of repository under pydata ++ organization ++ ++ -- Yaroslav Halchenko Mon, 16 Jan 2012 19:31:50 -0500 ++ ++pandas (0.6.1-1) UNRELEASED; urgency=low ++ ++ * New upstream release ++ * python-tk into Build-Depends ++ * Create matplotlibrc with backend: Agg to allow tests run without $DISPLAY ++ * Carry pre-cythonized .c files for systems with older Cython ++ * Skip few tests known to fail ++ ++ -- Yaroslav Halchenko Tue, 13 Dec 2011 18:36:11 -0500 ++ ++pandas (0.5.0+git7-gcf32be2-1) unstable; urgency=low ++ ++ * New upstream release with post-release fixes ++ ++ -- Yaroslav Halchenko Tue, 01 Nov 2011 21:15:06 -0400 ++ ++pandas (0.4.3-1) unstable; urgency=low ++ ++ * New upstream release(s): primarily bugfixes and optimizations but also ++ with some minor API changes and new functionality ++ * Adjusted debian/watch to match new layout on github ++ ++ -- Yaroslav Halchenko Tue, 18 Oct 2011 11:27:50 -0400 ++ ++pandas (0.4.1-1) unstable; urgency=low ++ ++ * New upstream bugfix release ++ - incorporated all debian/patches ++ * debian/rules: 'clean' removes generated pandas/version.py ++ * debian/copyright: adjusted to become DEP-5 compliant ++ ++ -- Yaroslav Halchenko Sun, 25 Sep 2011 21:48:30 -0400 ++ ++pandas (0.4.0-1) unstable; urgency=low ++ ++ * Initial Debian release (Closes: #641464) ++ ++ -- Yaroslav Halchenko Tue, 13 Sep 2011 12:24:05 -0400 diff --cc debian/contributors_list.txt index 00000000,00000000..3c42ea90 new file mode 100644 --- /dev/null +++ b/debian/contributors_list.txt @@@ -1,0 -1,0 +1,2022 @@@ ++Current to 1.0.5 (generated by git shortlog -ns) ++There may be multiple entries for the same person if they have used more than one (form of their) name ++https://github.com/pandas-dev/pandas/graphs/contributors ++ ++ 3130 Wes McKinney ++ 3043 jreback ++ 1687 Jeff Reback ++ 1239 jbrockmendel ++ 965 Joris Van den Bossche ++ 943 y-p ++ 663 Tom Augspurger ++ 630 Chang She ++ 607 Phillip Cloud ++ 459 gfyoung ++ 344 sinhrks ++ 327 Adam Klein ++ 245 Matthew Roeschke ++ 245 Simon Hawkins ++ 204 William Ayd ++ 160 Andy Hayden ++ 149 Marc Garcia ++ 148 topper-123 ++ 130 MeeseeksMachine ++ 128 Jeff Tratner ++ 122 Jeffrey Tratner ++ 109 Vytautas Jancauskas ++ 92 Pietro Battiston ++ 90 Jeremy Schendel ++ 89 Skipper Seabold ++ 87 MomIsBestFriend ++ 85 h-vetinari ++ 84 Stephan Hoyer ++ 76 Wouter Overmeire ++ 70 jschendel ++ 67 alimcmaster1 ++ 65 Sinhrks ++ 61 TomAugspurger ++ 58 Mortada Mehyar ++ 56 Christopher Whelan ++ 56 Kevin Sheppard ++ 55 Chris ++ 54 Thomas Kluyver ++ 54 behzad nouri ++ 50 Terji Petersen ++ 49 Ka Wo Chen ++ 45 Kaiqi Dong ++ 43 Kieran O'Mahony ++ 42 Dieter Vandenbussche ++ 38 Paul Reidy ++ 34 Martin Winkel ++ 33 Evan Wright ++ 33 chris-b1 ++ 32 bwignall ++ 31 Jan Schulz ++ 30 Mak Sze Chun ++ 29 Yaroslav Halchenko ++ 29 immerrr ++ 28 Daniel Saxton ++ 28 David Stephens ++ 26 Gábor Lipták ++ 25 Justin Zheng ++ 25 Stephen Lin ++ 25 Thein Oo ++ 25 rockg ++ 23 Fabian Haase ++ 22 Licht Takeuchi ++ 22 Ming Li ++ 22 ganevgv ++ 21 Thierry Moisan ++ 20 Dan Birken ++ 20 lexual ++ 19 Abraham Flaxman ++ 19 Artemy Kolchinsky ++ 19 DSM ++ 18 Kerby Shedden ++ 18 Ryan Nazareth ++ 18 pilkibun ++ 18 unutbu ++ 17 Anthonios Partheniou ++ 17 Nicolas Bonnotte ++ 16 Matt Roeschke ++ 16 Saurav Chakravorty ++ 16 danielballan ++ 16 onesandzeroes ++ 15 Abdullah İhsan Seçer ++ 15 Tobias Brandt ++ 15 seth-p ++ 14 Christopher C. Aycock ++ 14 Dr. Irv ++ 14 Gregory Rome ++ 14 Maximilian Roos ++ 14 Roy Hyunjin Han ++ 14 Spencer Lyon ++ 14 locojaydev ++ 13 Alex Rothberg ++ 13 John W. O'Brien ++ 13 Jonathan Chambers ++ 13 Marco Gorelli ++ 13 Todd Jennings ++ 13 Vaibhav Vishal ++ 12 Alex Rychyk ++ 12 Luca Beltrame ++ 12 PKEuS ++ 12 Richard T. Guy ++ 12 Tommy ++ 12 Wenhuan ++ 11 Andrew Rosenfeld ++ 11 Daniel Himmelstein ++ 11 Joe Jevnik ++ 11 Keith Hughitt ++ 11 anmyachev ++ 11 terrytangyuan ++ 10 Aaron Critchley ++ 10 Alexander Ponomaroff ++ 10 Anjali2019 ++ 10 Dale Jung ++ 10 Garrett Drapala ++ 10 John McNamara ++ 10 K.-Michael Aye ++ 10 Martin Blais ++ 10 Nick Eubank ++ 10 Sam Foo ++ 10 cbertinato ++ 9 Albert Villanova del Moral ++ 9 Damien Garaud ++ 9 Guillaume Gay ++ 9 Jacob Schaer ++ 9 Kendall Masse ++ 9 Tong SHEN ++ 9 Viktor Kerkez ++ 8 Aly Sivji ++ 8 Brandon Bradley ++ 8 Dr-Irv ++ 8 Hugh Kelley ++ 8 Irv Lustig ++ 8 Kalyan Gokhale ++ 8 Mike Kelly ++ 8 Shawn Heide ++ 8 Tim Swast ++ 8 Uwe L. Korn ++ 8 Varun ++ 8 Vasily Litvinov ++ 8 himanshu awasthi ++ 8 jnmclarty ++ 7 Adam J. Stewart ++ 7 Benedikt Sauer ++ 7 Bharat Raghunathan ++ 7 Dan Allan ++ 7 Francis T. O'Donovan ++ 7 Jason Sexauer ++ 7 Jiaxiang ++ 7 Kernc ++ 7 Michael Mueller ++ 7 Pandas Development Team ++ 7 Samesh Lakhotia ++ 7 Takafumi Arakaki ++ 7 Winterflower ++ 7 Younggun Kim ++ 7 dsm054 ++ 7 nipunreddevil ++ 7 proost ++ 7 scls19fr ++ 6 ArtinSarraf ++ 6 Benjamin Rowell ++ 6 Brad Buran ++ 6 Bran Yang ++ 6 Brian Wignall ++ 6 Chris Whelan ++ 6 David Bew ++ 6 Garrett-R ++ 6 Graham Inggs ++ 6 Jeff Carey ++ 6 Joel Nothman ++ 6 Kelsey Jordahl ++ 6 Mateusz Woś ++ 6 Matt Kirk ++ 6 Neil Parley ++ 6 Nicholaus E. Halecky ++ 6 Piotr Jucha ++ 6 Rouz Azari ++ 6 Stephen Hoover ++ 6 Steve ++ 6 Thomas A Caswell ++ 6 Thomas Grainger ++ 6 Tola A ++ 6 Uddeshya Singh ++ 6 Vincent Arel-Bundock ++ 6 Wes Turner ++ 6 ZhuBaohe ++ 6 agijsberts ++ 6 agraboso ++ 6 ajcr ++ 6 clham ++ 6 ischwabacher ++ 6 timmie ++ 6 tmnhat2001 ++ 5 Adam Obeng ++ 5 Addison Lynch ++ 5 Ajay Saxena ++ 5 Alan Velasco ++ 5 Andrew ++ 5 Ben Kandel ++ 5 Chapman Siu ++ 5 Chuanzhu Xu ++ 5 Clark Fitzgerald ++ 5 Clemens Brunner ++ 5 DataOmbudsman ++ 5 David Cottrell ++ 5 Douglas McNeil ++ 5 Gjelt ++ 5 Janelle Zoutkamp ++ 5 Jon Mease ++ 5 Joris Vankerschaver ++ 5 Josh ++ 5 Ksenia ++ 5 Lars Buitinck ++ 5 LeakedMemory ++ 5 Marc Abramowitz ++ 5 Mark Wiebe ++ 5 Matti Picus ++ 5 Nicholas Musolino ++ 5 Oğuzhan Öğreden ++ 5 Pauli Virtanen ++ 5 Prabakaran Kumaresshan ++ 5 Sangwoong Yoon ++ 5 Scott E Lasley ++ 5 Tao He ++ 5 Tarbo Fukazawa ++ 5 Ted Petrou ++ 5 Tomaz Berisa ++ 5 TrigonaMinima ++ 5 Troels Nielsen ++ 5 WANG Aiyong ++ 5 William Blan ++ 5 Yuichiro Kaneko ++ 5 benjamin ++ 5 dieterv77 ++ 5 donK23 ++ 5 kernc ++ 5 pv8493013j ++ 5 realead ++ 5 tshauck ++ 5 yui-knk ++ 4 Adam Greenhall ++ 4 Alvaro Tejero-Cantero ++ 4 Andreas Würl ++ 4 Armin Varshokar ++ 4 Avi Sen ++ 4 Benjamin Thyreau ++ 4 Blake Hawkins ++ 4 Bob Haffner ++ 4 Brenda Moon ++ 4 Brett Naul ++ 4 Chalmer Lowe ++ 4 Cheuk Ting Ho ++ 4 Chris Billington ++ 4 Chris Warth ++ 4 Chris Zimmerman ++ 4 Christian Hudon ++ 4 DaanVanHauwermeiren ++ 4 Dan Miller ++ 4 Daniel Waeber ++ 4 David Adrián Cañones Castellano ++ 4 Dražen Lučanin ++ 4 Erik ++ 4 Gianluca Rossi ++ 4 Giftlin Rajaiah ++ 4 Goyo ++ 4 Gregg Lind ++ 4 Guilherme Beltramini ++ 4 Henning Sperr ++ 4 Jacques Kvam ++ 4 James Myatt ++ 4 JennaVergeynst ++ 4 Jev Kuznetsov ++ 4 Jiang Yue ++ 4 Jim Crist ++ 4 John Zwinck ++ 4 Junya Hayashi ++ 4 Kevin Stone ++ 4 Krishna ++ 4 Kyle Meyer ++ 4 Laura Collard, PhD ++ 4 Loïc Estève ++ 4 Matias Heikkilä ++ 4 Matt Wittmann ++ 4 Matthew Gilbert ++ 4 Max Chen ++ 4 Max van Deursen ++ 4 Nathan Abel ++ 4 Nicholas Ver Halen ++ 4 OXPHOS ++ 4 Olivier Grisel ++ 4 Oluokun Adedayo ++ 4 Paul Ivanov ++ 4 Robert Gieseke ++ 4 Roger Thomas ++ 4 Samuel Sinayoko ++ 4 Shane Conway ++ 4 Shivam Rana ++ 4 Sofiane Mahiou ++ 4 Srinivas Reddy Thatiparthy (శ్రీనివాస్ రెడ్డి తాటిపర్తి) ++ 4 Stephen Rauch ++ 4 Trent Hauck ++ 4 Vincent La ++ 4 Vladimir Filimonov ++ 4 Vyomkesh Tripathi ++ 4 akosel ++ 4 cel4 ++ 4 cgohlke ++ 4 chapman siu ++ 4 gliptak ++ 4 hugo ++ 4 luzpaz ++ 4 nrebena ++ 4 rbenes ++ 4 robbuckley ++ 4 waitingkuo ++ 4 wcwagner ++ 3 Aaditya Panikath ++ 3 Abhijeet Krishnan ++ 3 Adam Hooper ++ 3 Aleksey Bilogur ++ 3 Alex Alekseyev ++ 3 Alexander Buchkovsky ++ 3 Alfonso MHC ++ 3 Alp Arıbal ++ 3 Andreas Winkler ++ 3 Angelos Evripiotis ++ 3 Aniruddha Bhattacharjee ++ 3 Anjana S ++ 3 Anton I. Sipos ++ 3 Baurzhan Muftakhidinov ++ 3 Ben Welsh ++ 3 Bhavani Ravi ++ 3 Big Head ++ 3 Brandon M. Burroughs ++ 3 Brock Mendel ++ 3 Caleb Epstein ++ 3 Carlos Souza ++ 3 Chris Bertinato ++ 3 Chris Mazzullo ++ 3 Christoph Gohlke ++ 3 Dave Hughes ++ 3 David Krych ++ 3 Deepyaman Datta ++ 3 Dillon Niederhut ++ 3 Doug Latornell ++ 3 Eric Chea ++ 3 Francesc Via ++ 3 Galuh Sahid ++ 3 Giacomo Ferroni ++ 3 Giftlin ++ 3 Grant Smith ++ 3 Guillaume Poulin ++ 3 Hammad Mashkoor ++ 3 Haochen Wu ++ 3 Hielke Walinga ++ 3 Hubert ++ 3 Hugues Valois ++ 3 Hyukjin Kwon ++ 3 Iain Barr ++ 3 Ingolf Becker ++ 3 Israel Saeta Pérez ++ 3 Ivan Nazarov ++ 3 Jack Bicknell ++ 3 Jake VanderPlas ++ 3 Jan Rudolph ++ 3 Jan-Philip Gehrcke ++ 3 Jared Groves ++ 3 Jean Helie ++ 3 Jean-Mathieu Deschenes ++ 3 Jeroen Kant ++ 3 Jesper Dramsch ++ 3 Joel Ostblom ++ 3 John Freeman ++ 3 Joon Ro ++ 3 Josh Klein ++ 3 Josiah Baker ++ 3 Karmel Allison ++ 3 Kate Surta ++ 3 Keshav Ramaswamy ++ 3 Kyle Barron ++ 3 Liam3851 ++ 3 Louis Huynh ++ 3 Markus Meier ++ 3 Mateusz ++ 3 Matt Braymer-Hayes ++ 3 Matthew Brett ++ 3 Maximiliano Greco ++ 3 Mitar ++ 3 Myles Braithwaite ++ 3 Naomi Bonnin ++ 3 Nate Yoder ++ 3 Nick Pentreath ++ 3 Noam Hershtig ++ 3 Pamela Wu ++ 3 Patrick O'Brien ++ 3 Paul ++ 3 Prasanjit Prakash ++ 3 Pulkit Maloo ++ 3 Randy Carnevale ++ 3 Riccardo Magliocchetti ++ 3 Richard Höchenberger ++ 3 Robin ++ 3 Robin Wilson ++ 3 Roman Yurchak ++ 3 Safia Abdalla ++ 3 Sahil Dua ++ 3 Sebastian Bank ++ 3 Shorokhov Sergey ++ 3 Sparkle Russell-Puleri ++ 3 Stefania Delprete ++ 3 Stefano Cianciulli ++ 3 Stijn Van Hoey ++ 3 Thijs Damsma ++ 3 Tjerk Santegoeds ++ 3 Toby Dylan Hocking ++ 3 Tom Ajamian ++ 3 Tom Bird ++ 3 Victoria Zdanovskaya ++ 3 Will Holmgren ++ 3 Yian ++ 3 Yoshiki Vázquez Baeza ++ 3 Zac Hatfield-Dodds ++ 3 adneu ++ 3 alphaCTzo7G ++ 3 anomrake ++ 3 davidshinn ++ 3 dengemann ++ 3 duozhang ++ 3 jdeschenes ++ 3 jen w ++ 3 joshuaar ++ 3 linebp ++ 3 lukasbk ++ 3 mcjcode ++ 3 miker985 ++ 3 nathalier ++ 3 ogiaquino ++ 3 prossahl ++ 3 rekcahpassyla ++ 3 ri938 ++ 3 stonebig ++ 3 thatneat ++ 3 unknown ++ 3 zach powers ++ 2 Aaron Staple ++ 2 Adam Bull ++ 2 Adam Marcus ++ 2 Alex Chase ++ 2 Alex Gaudio ++ 2 Alex Volkov ++ 2 Alex Watt ++ 2 Alexander Michael Schade ++ 2 Allen Downey ++ 2 Andrew Burrows ++ 2 Andy ++ 2 Andy R. Terrel ++ 2 Antoine Mazières ++ 2 Artem Bogachev ++ 2 Batalex ++ 2 Ben Alex ++ 2 Ben Schiller ++ 2 Ben Thayer ++ 2 Benjamin Adams ++ 2 Bernard Willers ++ 2 Bhuvana KA ++ 2 Bill Letson ++ 2 Bobae Kim ++ 2 Camilo Cota ++ 2 Carol Willing ++ 2 Charalampos Papaloizou ++ 2 Charles David ++ 2 Chris Grinolds ++ 2 Chris Reynolds ++ 2 Chris Stadler ++ 2 Chris Stoafer ++ 2 Christer van der Meeren ++ 2 Cody ++ 2 Dan Ringwalt ++ 2 Daniel Grady ++ 2 DanielFEvans ++ 2 Data & Code Expert Experimenting with Code on Data ++ 2 Dave Hirschfeld ++ 2 Dave Willmer ++ 2 David Arcos ++ 2 David Cook ++ 2 David Gwynne ++ 2 David Stansby ++ 2 Devin Petersohn ++ 2 Diego Argueta ++ 2 Doran Deluz ++ 2 Douglas Rudd ++ 2 Dražen Lučanin ++ 2 Dror Atariah ++ 2 Eduardo Schettino ++ 2 Egor ++ 2 Egor Panfilov ++ 2 Elle ++ 2 Elliott Sales de Andrade ++ 2 Eric Chlebek ++ 2 Fabian Retkowski ++ 2 Fabian Rost ++ 2 Fabio Zanini ++ 2 Fábio Rosado ++ 2 Gabriel Corona ++ 2 Geraint Duck ++ 2 Gina ++ 2 Gioia Ballin ++ 2 Graham Jeffries ++ 2 Grant Roch ++ 2 Guillaume Horel ++ 2 Guillaume Lemaitre ++ 2 Hamed Saljooghinejad ++ 2 Hannah Ferchland ++ 2 Hassan Kibirige ++ 2 Henry Kleynhans ++ 2 HubertKl ++ 2 HyunTruth ++ 2 Hyungtae Kim ++ 2 Ian Henriksen ++ 2 Iblis Lin ++ 2 Ignacio Santolin ++ 2 Ilya V. Schurov ++ 2 Ivan Smirnov ++ 2 JMBurley ++ 2 Jack Kelly ++ 2 Jacopo Rota ++ 2 Jaehoon Hwang ++ 2 Jaidev Deshpande ++ 2 James Draper ++ 2 Jan Koch ++ 2 Jaume Bonet ++ 2 Javad Noorbakhsh ++ 2 Jay Parlar ++ 2 Jeff Knupp ++ 2 Jeff Mellen ++ 2 Jeffrey Gerard ++ 2 Jethro Cao ++ 2 Jimmy Callin ++ 2 Jing Qiang Goh ++ 2 Joao Victor Martinelli ++ 2 Joaq Almirante ++ 2 Joe Bradish ++ 2 John David Reaver ++ 2 John G Evans ++ 2 John Liekezer ++ 2 John-Colvin ++ 2 Jon M. Mease ++ 2 Jonas Abernot ++ 2 Jonathan deWerd ++ 2 Jordi Contestí ++ 2 Jose Quinones ++ 2 Juarez Bochi ++ 2 Julia Evans ++ 2 Julia Signell ++ 2 Julien Danjou ++ 2 Jung Dong Ho ++ 2 Justin Bozonier ++ 2 Justin Lecher ++ 2 KOBAYASHI Ittoku ++ 2 Kamil Kisiel ++ 2 Kang Yoosam ++ 2 Kara de la Marck ++ 2 Karthigeyan ++ 2 Katrin Leinweber ++ 2 Koustav Samaddar ++ 2 Kunal Gosar ++ 2 Kyle Kelley ++ 2 Kyle Prestel ++ 2 LJ ++ 2 Leif Johnson ++ 2 Leif Walsh ++ 2 Leo Razoumov ++ 2 Luca Scarabello ++ 2 Lucas Kushner ++ 2 Lucas Scarlato Astur ++ 2 Luke ++ 2 Mabel Villalba ++ 2 Mahmoud Lababidi ++ 2 Manan Pal Singh ++ 2 Manraj Singh ++ 2 Marc ++ 2 Marco Hemken ++ 2 Marco Neumann ++ 2 Margaret Sy ++ 2 Martina G. Vilas ++ 2 Mason Gallo ++ 2 Mats Maiwald ++ 2 Matt Maybeno ++ 2 Max Bolingbroke ++ 2 Maxim Veksler ++ 2 Michael ++ 2 Michael Charlton ++ 2 Michael Odintsov ++ 2 Michael Penkov ++ 2 Michael Schatzow ++ 2 Michael W Schatzow ++ 2 Miguel ++ 2 Min RK ++ 2 Mitch Negus ++ 2 Mohamed Amine ZGHAL ++ 2 Monson Shao ++ 2 Nathalie Rud ++ 2 Nathan Pinger ++ 2 Naveen Michaud-Agrawal ++ 2 Nick Chmura ++ 2 Nico Cernek ++ 2 Nicolas Dickreuter ++ 2 Nikhil Kumar Mengani ++ 2 Nipun Batra ++ 2 Oleg Shteynbuk ++ 2 Oliver Hofkens ++ 2 Ondrej Kokes ++ 2 Ondřej Čertík ++ 2 Paddy Mullen ++ 2 Pankaj Pandey ++ 2 Patrick O'Keeffe ++ 2 Paul Ganssle ++ 2 Paul Lee ++ 2 Pawel Kordek ++ 2 Peter Prettenhofer ++ 2 Philipp A ++ 2 Pierre Haessig ++ 2 Piotr Chromiec ++ 2 Piyush Aggarwal ++ 2 Prakhar Pandey ++ 2 Rafal Skolasinski ++ 2 Rajib Mitra ++ 2 Ravi Kumar Nimmi ++ 2 Rick ++ 2 Rinoc Johnson ++ 2 Rob Levy ++ 2 Robert Meyer ++ 2 Roei.r ++ 2 Rohit Sanjay ++ 2 Roman Pekar ++ 2 Ronan Lamy ++ 2 Roshni ++ 2 RuiDC ++ 2 Ruijing Li ++ 2 Ryan ++ 2 Ryan Rehman ++ 2 Sarah Donehower ++ 2 SaÅ¡o Stanovnik ++ 2 Scott Sanderson ++ 2 Seb ++ 2 Shadi Akiki ++ 2 Shahul Hameed ++ 2 Shantanu Gontia ++ 2 Sheppard, Kevin ++ 2 Sietse Brouwer ++ 2 Simon Gibbons ++ 2 Simon-Martin Schröder ++ 2 Simone Basso ++ 2 SleepingPills ++ 2 Souvik Mandal ++ 2 Soyoun(Rose) Kim ++ 2 Stefaan Lippens ++ 2 Stephen Childs ++ 2 Stephen Pascoe ++ 2 Stephen Simmons ++ 2 Steve Cook ++ 2 Sumanau Sareen ++ 2 Tan Tran ++ 2 Tanya Jain ++ 2 Thiviyan Thanapalasingam ++ 2 Thomas Li ++ 2 Tiago Antao ++ 2 Tiago Requeijo ++ 2 Tim D. Smith ++ 2 Tim Hoffmann ++ 2 Todd DeLuca ++ 2 Tomoyuki Suzuki ++ 2 Tony Lorenzo ++ 2 Tony Tao ++ 2 Travis N. Vaught ++ 2 Tushar Gupta ++ 2 Tushar Mittal ++ 2 Tux1 ++ 2 Tyler Reddy ++ 2 Valentin Haenel ++ 2 Varad Gunjal ++ 2 Victor Villas ++ 2 Vikram Bhandoh ++ 2 Vu Le ++ 2 Vytautas Jančauskas ++ 2 WBare ++ 2 Wilfred Hughes ++ 2 Will Ayd ++ 2 Will Furnass ++ 2 WillAyd ++ 2 Wuraola Oyewusi ++ 2 Xbar ++ 2 Yan Facai ++ 2 Yimeng Zhang ++ 2 Yoav Ram ++ 2 Yuecheng Wu ++ 2 Zach Angell ++ 2 adatasetaday ++ 2 akittredge ++ 2 ante328 ++ 2 bashtage ++ 2 bjonen ++ 2 bolkedebruin ++ 2 broessli ++ 2 cgangwar11 ++ 2 charalampos papaloizou ++ 2 conquistador1492 ++ 2 csfarkas ++ 2 dahlbaek ++ 2 danielplawrence ++ 2 dannyhyunkim ++ 2 david-liu-brattle-1 ++ 2 deflatSOCO ++ 2 discort ++ 2 dlovell ++ 2 dwkenefick ++ 2 elpres ++ 2 fjdiod ++ 2 fjetter ++ 2 froessler ++ 2 ghasemnaddaf ++ 2 ianzur ++ 2 jaimefrio ++ 2 jlamborn324 ++ 2 jmorris0x0 ++ 2 jonaslb ++ 2 keitakurita ++ 2 killerontherun1 ++ 2 kpapdac ++ 2 krsnik93 ++ 2 llllllllll ++ 2 louispotok ++ 2 mattip ++ 2 michaelws ++ 2 msund ++ 2 mtrbean ++ 2 nlepleux ++ 2 nsuresh ++ 2 nullptr ++ 2 ohad83 ++ 2 ottiP ++ 2 phaebz ++ 2 priyankjain ++ 2 qudade ++ 2 reidy-p ++ 2 roch ++ 2 rvernica ++ 2 scotthavard92 ++ 2 springcoil ++ 2 srib ++ 2 ssikdar1 ++ 2 svenharris ++ 2 taeold ++ 2 tim smith ++ 2 tobycheese ++ 2 tomneep ++ 2 tsdlovell ++ 2 tzinckgraf ++ 2 westurner ++ 2 xpvpc ++ 2 yogendrasoni ++ 1 1_x7 ++ 1 3553x ++ 1 A. Flaxman ++ 1 AJ Dyka ++ 1 AJ Pryor, Ph.D ++ 1 ARF ++ 1 Aaron Barber ++ 1 Aaron Schumacher ++ 1 Aaron Toth ++ 1 AbdealiJK ++ 1 Acanthostega ++ 1 Adam Chainz ++ 1 Adam Gleave ++ 1 Adam Kim ++ 1 Adam Klaum ++ 1 Adam Klimont ++ 1 Adam Smith ++ 1 AdamShamlian ++ 1 Adrian ++ 1 Adrian Liaw ++ 1 Adrien Emery ++ 1 Agustín Herranz ++ 1 Aivengoe ++ 1 Akash Tandon ++ 1 Alan Du ++ 1 Alan Hogue ++ 1 Alan Yee ++ 1 Alastair James ++ 1 Alejandro Giacometti ++ 1 Alejandro Hohmann ++ 1 Aleksandr Drozd ++ 1 Alessandro Amici ++ 1 Alex B ++ 1 Alex Kirko ++ 1 Alex Lubbock ++ 1 Alex Marchenko ++ 1 Alex Radu ++ 1 Alex Strick van Linschoten ++ 1 Alex Vig ++ 1 AlexTereshenkov ++ 1 Alexander Hendorf ++ 1 Alexander Hess ++ 1 Alexander Lenail ++ 1 Alexander Nordin ++ 1 Alexandre Batisse ++ 1 Alexandre Decan ++ 1 Alexis Mignon ++ 1 Alfredo Granja ++ 1 Allen Riddell ++ 1 AllenDowney ++ 1 Allison Browne ++ 1 Alok Singhal ++ 1 Alyssa Fu Ward ++ 1 Aman Thakral ++ 1 Amol ++ 1 Amol Agrawal ++ 1 Amol K ++ 1 Amol Kahat ++ 1 Andrea Bedini ++ 1 Andreas Buhr ++ 1 Andreas H. ++ 1 Andreas Klostermann ++ 1 Andreas Költringer ++ 1 Andrew Bui ++ 1 Andrew Fiore-Gartland ++ 1 Andrew Gaspari ++ 1 Andrew Gross ++ 1 Andrew Kittredge ++ 1 Andrew McPherson ++ 1 Andrew Munch ++ 1 Andrew Spott ++ 1 Andrew Wood ++ 1 Andrew 亮 ++ 1 András Novoszáth ++ 1 André Jonasson ++ 1 Andy Craze ++ 1 Andy Li ++ 1 Angela Ambroz ++ 1 Anh Le ++ 1 Aniket uttam ++ 1 Anil Kumar Pallekonda ++ 1 Ankit Dhankhar ++ 1 Anthony O'Brien ++ 1 Antoine Pitrou ++ 1 Antoine Viscardi ++ 1 Antonio Andraues Jr ++ 1 Antonio Gutierrez ++ 1 Antonio Linde ++ 1 Antonio Molina ++ 1 Antonio Quinonez ++ 1 Antti Kaihola ++ 1 Anudeep Tubati ++ 1 Arash Rouhani ++ 1 Arco Bast ++ 1 Arda Kosar ++ 1 Arjun Sharma ++ 1 Arno Veenstra ++ 1 Ashish Singal ++ 1 Ashwini Chaudhary ++ 1 Asish Mahapatra ++ 1 Austin Hackett ++ 1 Avi Kelman ++ 1 AyowoleT ++ 1 Azeez Oluwafemi ++ 1 Barry Fitzgerald ++ 1 Bart Aelterman ++ 1 Bas Nijholt ++ 1 Bastiaan ++ 1 Bayle Shanks ++ 1 Becky Sweger ++ 1 Ben ++ 1 Ben Auffarth ++ 1 Ben James ++ 1 Ben Nelson ++ 1 Ben North ++ 1 Benjamin Gross ++ 1 Benjamin Grove ++ 1 Benjamin M. Gross ++ 1 Benoit Paquet ++ 1 Benoit Pointet ++ 1 Benoît Vinot ++ 1 Berkay ++ 1 Bernhard Thiel ++ 1 Bhavesh Poddar ++ 1 BielStela ++ 1 Bill Chambers ++ 1 Bjorn Arneson ++ 1 Blair ++ 1 Bob Baxley ++ 1 Boris Lau ++ 1 BorisVerk ++ 1 Brandon Rhodes ++ 1 BrenBarn ++ 1 Brett Randall ++ 1 Brett Rosen ++ 1 Brian ++ 1 Brian Choi ++ 1 Brian Granger ++ 1 Brian J. McGuirk ++ 1 Brian Jacobowski ++ 1 Brian McFee ++ 1 Brian Quistorff ++ 1 Brian Tu ++ 1 Bruno P. Kinoshita ++ 1 Bryan Cutler ++ 1 Bryant Moscon ++ 1 Bryce Guinta ++ 1 C John Klehm ++ 1 C.A.M. Gerlach ++ 1 Caleb Braun ++ 1 Carl Johan ++ 1 Carlos Eduardo Moreira dos Santos ++ 1 Carlos García Márquez ++ 1 Carter Green ++ 1 Cecilia ++ 1 Cesar H ++ 1 Chankey Pathak ++ 1 Charlie Clark ++ 1 Chase Albert ++ 1 Chau Hoang ++ 1 Chitrank Dixit ++ 1 Chris Burr ++ 1 Chris Carroux ++ 1 Chris Catalfo ++ 1 Chris Filo Gorgolewski ++ 1 Chris Gilmer ++ 1 Chris Ham ++ 1 Chris Kerr ++ 1 Chris M ++ 1 Chris Mulligan ++ 1 Chris Roberts ++ 1 Chris Withers ++ 1 Christian Berendt ++ 1 Christian Chwala ++ 1 Christian Geier ++ 1 Christian Haege ++ 1 Christian Perez ++ 1 Christian Prinoth ++ 1 Christian Stade-Schuldt ++ 1 Christoph Moehl ++ 1 Christoph Paulik ++ 1 Christopher Scanlin ++ 1 Chu Qing Hao ++ 1 Cihan Ceyhan ++ 1 Clearfield Christopher ++ 1 Clemens Tolboom ++ 1 Cody Piersall ++ 1 Colin ++ 1 Connor Charles ++ 1 Constantine Glen Evans ++ 1 Cornelius Riemenschneider ++ 1 Crystal Gong ++ 1 D.S. McNeil ++ 1 Da Cheezy Mobsta ++ 1 Da Wang ++ 1 DaCoEx ++ 1 Damian Kula ++ 1 Damini Satya ++ 1 Dan Davison ++ 1 Dan Dixey ++ 1 Daniel Chen ++ 1 Daniel Frank ++ 1 Daniel Garrido ++ 1 Daniel Hrisca ++ 1 Daniel Hähnke ++ 1 Daniel Julius Lasiman ++ 1 Daniel Luis Costa ++ 1 Daniel Ni ++ 1 Daniel Sakuma ++ 1 Daniel Shapiro ++ 1 Daniel Siladji ++ 1 Darcy Meyer ++ 1 Darin Plutchok ++ 1 Dav Clark ++ 1 Dave Lewis ++ 1 David BROCHART ++ 1 David C Hall ++ 1 David Fischer ++ 1 David Hirschfeld ++ 1 David Hoese ++ 1 David Hoffman ++ 1 David John Gagne ++ 1 David Jung ++ 1 David Kelly ++ 1 David Liu ++ 1 David Lutz ++ 1 David Polo ++ 1 David Rasch ++ 1 David Read ++ 1 David Wolever ++ 1 David Zaslavsky ++ 1 DavidRosen ++ 1 Dean ++ 1 Dean Langsam ++ 1 Deepan Das ++ 1 Denis Belavin ++ 1 Dennis Kamau ++ 1 Diane Trout ++ 1 Diego Fernandez ++ 1 Diego Torres ++ 1 Dimitri ++ 1 Dimitris Spathis ++ 1 Dmitry L ++ 1 Dobatymo ++ 1 Dody Suria Wijaya ++ 1 Dominik Stanczak ++ 1 Donald Curtis ++ 1 DorAmram ++ 1 Dorothy Kabarozi ++ 1 Dorozhko Anton ++ 1 Doug Coleman ++ 1 Dr. Leo ++ 1 DrIrv ++ 1 Drew Fustin ++ 1 Drew Heenan ++ 1 Drewrey Lupton ++ 1 Dylan Dmitri Gray ++ 1 ETF ++ 1 EdAbati ++ 1 Eduardo Blancas Reyes ++ 1 Ehsan Azarnasab ++ 1 Eliza Mae Saret ++ 1 Elliot Marsden ++ 1 Elliot S ++ 1 Endre Mark Borza ++ 1 Ennemoser Christoph ++ 1 Enrico Rotundo ++ 1 Eric Boxer ++ 1 Eric Brassell ++ 1 Eric Kisslinger ++ 1 Eric O. LEBIGOT (EOL) ++ 1 Eric Stein ++ 1 Eric Wieser ++ 1 Eric Wong ++ 1 Erik Fredriksen ++ 1 Erik M. Bray ++ 1 Erik Nilsson ++ 1 Ernesto Freitas ++ 1 EternalLearner42 ++ 1 Eunseop Jeong ++ 1 Evan ++ 1 Evan Livelo ++ 1 Eyden Villanueva ++ 1 FAN-GOD ++ 1 Fabien Aulaire ++ 1 Fabrizio Pollastri ++ 1 Fakabbir Amin ++ 1 Fei Phoon ++ 1 Felix Divo ++ 1 Felix Lawrence ++ 1 Felix Marczinowski ++ 1 Fer Sar ++ 1 Fernando Margueirat ++ 1 Fernando Perez ++ 1 Filip Ter ++ 1 Flavien Lambert ++ 1 Florian Müller ++ 1 Florian Rathgeber ++ 1 Florian Wilhelm ++ 1 Floris Kint ++ 1 ForTimeBeing ++ 1 Forbidden Donut ++ 1 FragLegs ++ 1 Francesc Alted ++ 1 Francesco Brundu ++ 1 Francesco Truzzi ++ 1 Frank Cleary ++ 1 Frank Hoang ++ 1 Frank Pinter ++ 1 Frans van Dunné ++ 1 Fumito Hamamura ++ 1 Gabe F ++ 1 Gabe Fernando ++ 1 Gabi Davar ++ 1 Gabriel Araujo ++ 1 Gabriel Monteiro ++ 1 Gabriel Reid ++ 1 Gabriel de Maeztu ++ 1 Gaibo Zhang ++ 1 Gaëtan de Menten ++ 1 George Kuan ++ 1 Georgi Baychev ++ 1 German Gomez-Herrero ++ 1 Gianpaolo Macario ++ 1 Gilberto Olimpio ++ 1 GiuPassarelli ++ 1 Giulio Pepe ++ 1 Giuseppe Romagnuolo ++ 1 Gordon Blackadder ++ 1 Gosuke Shibahara ++ 1 Gouthaman Balaraman ++ 1 Graham R. Jeffries ++ 1 Graham Taylor ++ 1 Greg Reda ++ 1 Greg Williams ++ 1 Gregory Livschitz ++ 1 Grigorios Giannakopoulos ++ 1 Grzegorz Konefał ++ 1 Guilherme Leite ++ 1 Guilherme Salomé ++ 1 Guilherme Samora ++ 1 Guillem Borrell ++ 1 Gyeongjae Choi ++ 1 HHest ++ 1 HagaiHargil ++ 1 Haleemur Ali ++ 1 Hamish Pitkeathly ++ 1 Hanmin Qin ++ 1 Hans ++ 1 Hao Wu ++ 1 Harsh Nisar ++ 1 Harshit Patni ++ 1 Harutaka Kawamura ++ 1 Hassan Shamim ++ 1 Hatem Nassrat ++ 1 Hendrik Makait ++ 1 Henry Hammond ++ 1 Hissashi Rocha ++ 1 How Si Wei ++ 1 Hsiaoming Yang ++ 1 Huan Li ++ 1 Hugo Herter ++ 1 Huize Wang ++ 1 Hussain Tamboli ++ 1 Ian Dunn ++ 1 Ian Eaves ++ 1 Ian Hoegen ++ 1 Ibrahim Sharaf ElDen ++ 1 Ignacio Vergara Kausel ++ 1 Ignasi Fosch ++ 1 Igor Conrado Alves de Lima ++ 1 Igor Filippov ++ 1 Igor Shelvinskyi ++ 1 Illia Polosukhin ++ 1 Imanflow ++ 1 Inevitable-Marzipan ++ 1 Isaac Schwabacher ++ 1 Isaac Slavitt ++ 1 Isaac Virshup ++ 1 IsvenC ++ 1 Iulius Curt ++ 1 Iva Koevska ++ 1 Iva Miholic ++ 1 Ivan Bessarabov ++ 1 Iván Vallés Pérez ++ 1 JElfner ++ 1 Jackie Leng ++ 1 Jacob Buckheit ++ 1 Jacob Bundgaard ++ 1 Jacob Howard ++ 1 Jacob Wasserman ++ 1 Jake Torcasso ++ 1 Jakob Jarmar ++ 1 Jakub Nowacki ++ 1 James Bourbeau ++ 1 James Casbon ++ 1 James Cobon-Kerr ++ 1 James Goppert ++ 1 James Hiebert ++ 1 James McBride ++ 1 James Santucci ++ 1 James Winegar ++ 1 Jan F-F ++ 1 Jan Novotný ++ 1 Jan Pipek ++ 1 Jan Wagner ++ 1 Jan Werkmann ++ 1 Jan Å koda ++ 1 Jarrod Millman ++ 1 Jason Bandlow ++ 1 Jason Kiley ++ 1 Jason Swails ++ 1 Jason Wolosonovich ++ 1 Jasper J.F. van den Bosch ++ 1 Javad ++ 1 Jay Alammar ++ 1 Jay Bourque ++ 1 Jay Offerdahl ++ 1 Jayanth Katuri ++ 1 Jean-Baptiste Schiratti ++ 1 Jeff Blackburne ++ 1 Jeff Hammerbacher ++ 1 Jeffrey Starr ++ 1 Jenn Olsen ++ 1 Jeongmin Yu ++ 1 Jeremy Wagner ++ 1 Jerod Estapa ++ 1 Jesse Farnham ++ 1 Jesse Pardue ++ 1 Jihyung Moon ++ 1 Jim ++ 1 Jim Jeon ++ 1 JimStearns206 ++ 1 Jimmy Woo ++ 1 Jinyang Zhou ++ 1 Joachim Wagner ++ 1 Joan Martin Miralles ++ 1 Joerg Rittinger ++ 1 Joeun Park ++ 1 Johan von Forstner ++ 1 John ++ 1 John Cant ++ 1 John Evans ++ 1 John Fremlin ++ 1 John Tucker ++ 1 John Ward ++ 1 Johnny ++ 1 Johnny Chiu ++ 1 Johnny Gill ++ 1 Johnny Metz ++ 1 Jon Crall ++ 1 Jonas ++ 1 Jonas Buyl ++ 1 Jonas Hoersch ++ 1 Jonas Schulze ++ 1 Jonathan J. Helmus ++ 1 Jonathan Larkin ++ 1 Jonathan Whitmore ++ 1 Jonathan de Bruin ++ 1 Jonathon Vandezande ++ 1 Jongwony ++ 1 Joost Kranendonk ++ 1 Jop Vermeer ++ 1 Jordan Erenrich ++ 1 Jorge López Fueyo ++ 1 Joschka zur Jacobsmühlen ++ 1 Jose Rivera-Rubio ++ 1 JosephWagner ++ 1 Josh Dimarsky ++ 1 Josh Friedlander ++ 1 Josh Howes ++ 1 Josh Levy-Kramer ++ 1 Josh Owen ++ 1 Joshua Bradt ++ 1 Joshua Leahy ++ 1 Joshua Smith ++ 1 Joshua Storck ++ 1 José F. R. Fonseca ++ 1 Jovixe ++ 1 Jozef Brandys ++ 1 Julian Kuhlmann ++ 1 Julian Santander ++ 1 Julien Marrec ++ 1 Julio Martinez ++ 1 Jun ++ 1 Jun Kim ++ 1 Juraj Niznan ++ 1 Justin Berka ++ 1 Justin C Johnson ++ 1 Justin Cole ++ 1 Justin Lin ++ 1 Justin Solinsky ++ 1 Jörg Döpfert ++ 1 Júlio ++ 1 Kacawi ++ 1 Kamal Kamalaldin ++ 1 Kamil Sindi ++ 1 Kane ++ 1 Kapil Patel ++ 1 Karel De Brabandere ++ 1 Karl Dunkle Werner ++ 1 Karmanya Aggarwal ++ 1 Karrie Kehoe ++ 1 Kassandra Keeton ++ 1 Katherine Surta ++ 1 Katherine Younglove ++ 1 Katie Atkinson ++ 1 Kee Chong Tan ++ 1 Keiron Pizzey ++ 1 Keith Kraus ++ 1 Keith Webber ++ 1 Keming Zhang ++ 1 Ken Van Haren ++ 1 Kenneth ++ 1 Kevin Kuhl ++ 1 Kevin Markham ++ 1 Kevin Nguyen ++ 1 Kimi Li ++ 1 Kirk Hansen ++ 1 Kisekka David ++ 1 Kodi Arfer ++ 1 Koushik ++ 1 Kristian Holsheimer ++ 1 Krzysztof Chomski ++ 1 Ksenia Bobrova ++ 1 Ksenia Gueletina ++ 1 Kurtis Kerstein ++ 1 Kyle ++ 1 Kyle Boone ++ 1 Kyle Hausmann ++ 1 Kyle Kosic ++ 1 Kyle McCahill ++ 1 Laksh Arora ++ 1 Larry Ren ++ 1 Laurens Geffert ++ 1 Laurent Gautier ++ 1 Leon Yin ++ 1 Li Jin ++ 1 Liam Marshall ++ 1 Line Pedersen ++ 1 LiuSeeker ++ 1 Liudmila ++ 1 Lorenzo Bolla ++ 1 Lorenzo Cestaro ++ 1 Lorenzo Stella ++ 1 Loïc Séguin-C ++ 1 Luca Donini ++ 1 Luciano Viola ++ 1 Ludovico Russo ++ 1 Luis Ortiz ++ 1 Luiz Gustavo ++ 1 Lukasz ++ 1 Luke Lee ++ 1 Luke Shepard ++ 1 Luo Yicheng ++ 1 MKhalusova ++ 1 Maarten Rietbergen ++ 1 Mac ++ 1 Maciej J ++ 1 Magnus Jöud ++ 1 Mahdi Ben Jelloul ++ 1 Makarov Andrey ++ 1 Malgorzata Turzanska ++ 1 Manu NALEPA ++ 1 Manuel Leonhardt ++ 1 Manuel Riel ++ 1 Maoyuan Liu ++ 1 Marco Farrugia ++ 1 Maria del Mar Bibiloni ++ 1 Mario Corchero ++ 1 Marius Potgieter ++ 1 Mark Mandel ++ 1 Mark O'Leary ++ 1 Mark Roth ++ 1 Mark Sikora ++ 1 Mark Woodbridge ++ 1 Marlene Silva Marchena ++ 1 MarsGuy ++ 1 Martin Babka ++ 1 Martin Journois ++ 1 MasonGallo ++ 1 MatanCohe ++ 1 Mathew Topper ++ 1 Matt Bark ++ 1 Matt Boggess ++ 1 Matt Cooper ++ 1 Matt Gambogi ++ 1 Matt Savoie ++ 1 Matt Suggit ++ 1 Matt Williams ++ 1 MattRijk ++ 1 Matthew Kirk ++ 1 Matthew Lurie ++ 1 Matthew Rocklin ++ 1 Matthew Tan ++ 1 Matthias Bussonnier ++ 1 Matthias Kuhn ++ 1 Matthieu Brucher ++ 1 Matti Airas ++ 1 Max Chang ++ 1 Max Grender-Jones ++ 1 Max Kanter ++ 1 Max Kovalovs ++ 1 Max Mikhaylov ++ 1 MaxU ++ 1 Mayank Asthana ++ 1 Mehmet Ali "Mali" Akmanalp ++ 1 Michael Davis ++ 1 Michael Droettboom ++ 1 Michael E. Gruen ++ 1 Michael Felt ++ 1 Michael Gasvoda ++ 1 Michael Lamparski ++ 1 Michael P. Moran ++ 1 Michael Röttger ++ 1 Michael Scherer ++ 1 Michael Selik ++ 1 Michael Silverstein ++ 1 Michael Waskom ++ 1 Michael-J-Ward ++ 1 Michelangelo D'Agostino ++ 1 Michiel Stock ++ 1 Mickaël Schoentgen ++ 1 Mie~~~ ++ 1 Miguel Sánchez de León Peque ++ 1 Mike Cramblett ++ 1 Mike Graham ++ 1 Mike Kutzma ++ 1 Mikolaj Chwalisz ++ 1 Milo ++ 1 Min ho Kim ++ 1 MinGyo Jung ++ 1 MinRK ++ 1 Miroslav Å edivý ++ 1 Misha Veldhoen ++ 1 Mohit Anand ++ 1 Montana Low ++ 1 Moonsoo Kim ++ 1 Morgan Stuart ++ 1 Morgan243 ++ 1 Moritz Münst ++ 1 Morton Fox ++ 1 Moussa Taifi ++ 1 Muhammad Haseeb Tariq ++ 1 Mukul Ashwath Ram ++ 1 MusTheDataGuy ++ 1 Mykola Golubyev ++ 1 Nanda H Krishna ++ 1 Nate George ++ 1 Nathan Ford ++ 1 Nathan Sanders ++ 1 Nathan Typanski ++ 1 Navreet Gill ++ 1 Nehil Jain ++ 1 Nicholas Stahl ++ 1 Nicholas Ursa ++ 1 Nick Burns ++ 1 Nick Foti ++ 1 Nick Garvey ++ 1 Nick Stahl ++ 1 Nigel Markey ++ 1 Nikoleta Glynatsi ++ 1 Nikos Karagiannakis ++ 1 Nipun Sadvilkar ++ 1 Nis Martensen ++ 1 Noah ++ 1 Noah Spies ++ 1 Nolan Nichols ++ 1 Noora Husseini ++ 1 Noritada Kobayashi ++ 1 Noémi Éltető ++ 1 Oktay Sabak ++ 1 Olivier Bilodeau ++ 1 Olivier Harris ++ 1 Onno Eberhard ++ 1 Osman ++ 1 P-Tillmann ++ 1 Pablo Ambrosio ++ 1 Pan Deng / Zora ++ 1 Parfait G ++ 1 Pastafarianist ++ 1 Patrick Luo ++ 1 Patrick O'Melveny ++ 1 Patrick Park ++ 1 Patrik Hlobil ++ 1 Paul Mannino ++ 1 Paul Masurel ++ 1 Paul Mestemaker ++ 1 Paul Reiners ++ 1 Paul Siegel ++ 1 Paul van Mulbregt ++ 1 Paula ++ 1 Paulo Roberto de Oliveira Castro ++ 1 Pav A ++ 1 Peng Yu ++ 1 Pepe Flores ++ 1 Pete Huang ++ 1 Peter ++ 1 Peter Csizsek ++ 1 Peter Hoffmann ++ 1 Peter Quackenbush ++ 1 Peter Waller ++ 1 Peter Yanovich ++ 1 Petio Petrov ++ 1 Petr Baev ++ 1 Petra Chong ++ 1 Phil Ngo ++ 1 Phil Ruffwind ++ 1 Phil Schaf ++ 1 Philip Gura ++ 1 Philippe Ombredanne ++ 1 Prabhjot Singh ++ 1 Pradyumna Reddy Chinthala ++ 1 Pranav Suri ++ 1 Pratap Vardhan ++ 1 Priyanka Ojha ++ 1 Puneeth K ++ 1 Pyry Kovanen ++ 1 README Bot ++ 1 Raghav ++ 1 RahulHP ++ 1 Rajat ++ 1 Rajhans Jadhao ++ 1 Rajiv Bharadwaj ++ 1 Ralph Bean ++ 1 Ray Bell ++ 1 Redonnet Louis ++ 1 RenzoBertocchi ++ 1 Rhys Parry ++ 1 Richard Eames ++ 1 Richard Lewis ++ 1 Ridhwan Luthra ++ 1 Rishipuri ++ 1 Rob Forgione ++ 1 Rob deCarvalho ++ 1 Robert ++ 1 Robert Bradshaw ++ 1 Robert Gibboni ++ 1 Robert Kern ++ 1 Robin Kiplang'at ++ 1 RobinFiveWords ++ 1 Rodolfo Fernandez ++ 1 Rohan Pandit ++ 1 Rok Mihevc ++ 1 RomainSa ++ 1 Roman Imankulov ++ 1 Roman Khomenko ++ 1 Ross Petchler ++ 1 Roy Keyes ++ 1 Roymprog ++ 1 Rupert Thompson ++ 1 Rushabh Vasani ++ 1 Russell Smith ++ 1 Ryan Grout ++ 1 Ryan Hendrickson ++ 1 Ryan Joyce ++ 1 Ryszard T. Kaleta ++ 1 Rémy Léone ++ 1 Rüdiger Busche ++ 1 SEUNG HOON, SHIN ++ 1 Sakar Panta ++ 1 Sam Cohan ++ 1 Sam Zhang ++ 1 Sami Salonen ++ 1 Samir Musali ++ 1 Samuel Denny ++ 1 Samyak Jain ++ 1 Sandeep Pathak ++ 1 Sandrine Pataut ++ 1 Sanghee Kim ++ 1 Sangmin Park ++ 1 Sanjiv Lobo ++ 1 Santosh Kumar ++ 1 Sarah Bird ++ 1 Sarah Masud ++ 1 SarahJessica ++ 1 Sarma Tangirala ++ 1 Saul Shanabrook ++ 1 Saumitra Shahapure ++ 1 Schaer, Jacob C ++ 1 Scott Cole ++ 1 Scott Lasley ++ 1 Scott McAllister ++ 1 Scott Talbert ++ 1 SdgJlbl ++ 1 Sean Chan ++ 1 Sebastian Gsänger ++ 1 Sebastian Pölsterl ++ 1 Sebastian Rubbert ++ 1 Senthil Palanisami ++ 1 Sereger13 ++ 1 Sergei Ivko ++ 1 Sergey Kopylov ++ 1 Sergio Pascual ++ 1 Shannon Wang ++ 1 Sharad Vijalapuram ++ 1 Shashank Agarwal ++ 1 Shengpu Tang ++ 1 Shirish Kadam ++ 1 Shubham Chaudhary ++ 1 Shyam Saladi ++ 1 SiYoungOh ++ 1 Siddhesh Poyarekar ++ 1 Sidharthan Nair ++ 1 Simon Riddell ++ 1 SimonBaron ++ 1 Siu Kwan Lam ++ 1 Sourav kumar ++ 1 Soyoun Kim ++ 1 Spencer Carrucciu ++ 1 SplashDance ++ 1 Stefan van der Walt ++ 1 Stefano Miccoli ++ 1 Sten ++ 1 Stephen Cowley ++ 1 Stephen Kappel ++ 1 StephenVoland ++ 1 Sterling Paramore ++ 1 Steve Baker ++ 1 Steve Choi ++ 1 Steve Dower ++ 1 Steven ++ 1 Steven Cutting ++ 1 Stewart Henderson ++ 1 Stuart Berg ++ 1 Stéphan Taljaard ++ 1 Sudarshan Konge ++ 1 Sudeep ++ 1 Sumin Byeon ++ 1 Sven ++ 1 Sylvain Corlay ++ 1 Sylvia ++ 1 Szymon Bednarek ++ 1 Sébastien de Menten ++ 1 Sören ++ 1 T N ++ 1 Taavi Burns ++ 1 Takuya N ++ 1 Talitha Pumar ++ 1 Tamas Nagy ++ 1 Tambe Tabitha Achere ++ 1 Tang Heyi ++ 1 Tanmay Daripa ++ 1 Tara Adiseshan ++ 1 Telt ++ 1 Terry Santegoeds ++ 1 Thiago Cordeiro da Fonseca ++ 1 Thiago Serafim ++ 1 ThibTrip ++ 1 Thomas Kluiters ++ 1 Thomas Lentali ++ 1 Thomas Wiecki ++ 1 Thoralf Gutierrez ++ 1 Thouis (Ray) Jones ++ 1 Thrasibule ++ 1 Tilen Kusterle ++ 1 Tim Akinbo ++ 1 Tim Cera ++ 1 Tim Gates ++ 1 Tim Hopper ++ 1 Tim McNamara ++ 1 Tirth Jain ++ 1 Tobias Gustafsson ++ 1 Tom Farnbauer ++ 1 Tom Neep ++ 1 Tomasz Kluczkowski ++ 1 Tommy Lynch ++ 1 Tomáš Chvátal ++ 1 Tong Shen ++ 1 Travis ++ 1 Triple0 ++ 1 Tuan ++ 1 Tuhin Mahmud ++ 1 Tulio Casagrande ++ 1 Unprocessable ++ 1 Upkar Lidder ++ 1 Utkarsh Upadhyay ++ 1 Uwe ++ 1 Uwe Hoffmann ++ 1 Vadym Barda ++ 1 Vibhu Agarwal ++ 1 Victor Chaves ++ 1 Victor Maryama ++ 1 Victor Salgado ++ 1 Vijayant ++ 1 Vikram Shirgur ++ 1 Vikramjeet Das ++ 1 Vince W ++ 1 Vincent Davis ++ 1 Vinícius Figueiredo ++ 1 Vipin Kumar ++ 1 Vishwak Srinivasan ++ 1 Vitória Helena ++ 1 Vivek ++ 1 Vladislav ++ 1 Vyom Jain ++ 1 Víctor Moron Tejero ++ 1 Weiwen Gu ++ 1 Weston Renoud ++ 1 Wieland Hoffmann ++ 1 Wiktor Tomczak ++ 1 Wil Tan ++ 1 Will Thompson ++ 1 William Hogman ++ 1 Wilson Lin ++ 1 Winand ++ 1 Wouter De Coster ++ 1 XF ++ 1 Xiang Zhang ++ 1 YG-Riku ++ 1 YaOzI ++ 1 Yadunandan ++ 1 Yash Shukla ++ 1 Yasin A ++ 1 Yee Mey ++ 1 Yeojin Kim ++ 1 Yeongseon Choe ++ 1 Yevgeniy Grechka ++ 1 Yi Liu ++ 1 Yitzhak Andrade ++ 1 Yoann Goular ++ 1 Yoong Kang Lim ++ 1 Yosuke Nakabayashi ++ 1 Young Joong Kim ++ 1 Yu Wang ++ 1 Yuan Tang (Terry) ++ 1 Yuliya Dovzhenko ++ 1 Yulong Yang ++ 1 Yury Bayda ++ 1 Yusei Tahara ++ 1 Yuval Langer ++ 1 Zach Dwiel ++ 1 Zeke ++ 1 Zihao Zhao ++ 1 aaron315 ++ 1 abaldenko ++ 1 abarber4gh ++ 1 aberres ++ 1 acorbe ++ 1 adrian-stepien ++ 1 aeltanawy ++ 1 aernlund ++ 1 agustín méndez ++ 1 ailchau ++ 1 ajenkins-cargometrics ++ 1 akielbowicz ++ 1 alex argunov ++ 1 alex arsenovic ++ 1 alexander135 ++ 1 alexandercbooth ++ 1 alinde1 ++ 1 amphy ++ 1 amuta ++ 1 andymaheshw ++ 1 ankostis ++ 1 anton-d ++ 1 araraonline ++ 1 atbd ++ 1 austinc ++ 1 avelineg ++ 1 aviolov ++ 1 azuranski ++ 1 azure-pipelines[bot] ++ 1 babakkeyvani ++ 1 bastewart ++ 1 benarthur91 ++ 1 bertrandhaut ++ 1 bganglia ++ 1 bk521234 ++ 1 bkandel ++ 1 bmagnusson ++ 1 bmu ++ 1 boombard ++ 1 bpraggastis ++ 1 bravech ++ 1 brian-pantano ++ 1 c123w ++ 1 carlosdanielcsantos ++ 1 cclauss ++ 1 chaimdemulder ++ 1 chappers ++ 1 charlie0389 ++ 1 chebee7i ++ 1 chernrick ++ 1 chinhwee ++ 1 chinskiy ++ 1 chromy ++ 1 claudiobertoldi ++ 1 cmazzullo ++ 1 conmai ++ 1 cr3 ++ 1 cruzzoe ++ 1 cxl923cc ++ 1 cyrusmaher ++ 1 d10genes ++ 1 dajcs ++ 1 dalgarno ++ 1 daniel ++ 1 daniellebrown ++ 1 darke-spirits ++ 1 david ++ 1 davidjameshumphreys ++ 1 davidmvalente ++ 1 davidovitch ++ 1 daydreamt ++ 1 derestle-htwg ++ 1 dgram0 ++ 1 dickreuter ++ 1 dkamm ++ 1 dmanikowski-reef ++ 1 doosik_bae ++ 1 dr-leo ++ 1 dubourg ++ 1 dylanchase ++ 1 economy ++ 1 eduardo naufel schettino ++ 1 ejnens ++ 1 elrubio ++ 1 emilydolson ++ 1 endenis ++ 1 engstrom ++ 1 enisnazif ++ 1 est271 ++ 1 euri10 ++ 1 evangelineliu ++ 1 ezcitron ++ 1 fabriziop ++ 1 faic ++ 1 fding253 ++ 1 fengyqf ++ 1 fivemok ++ 1 fl4p ++ 1 fleimgruber ++ 1 floydsoft ++ 1 flying-sheep ++ 1 francisco souza ++ 1 funnycrab ++ 1 gabrielclow ++ 1 ganego ++ 1 garanews ++ 1 gfr ++ 1 goldenbull ++ 1 guygoldberg ++ 1 gwrome ++ 1 hack-c ++ 1 haison ++ 1 hannah-c ++ 1 harisbal ++ 1 harshul1610 ++ 1 hasnain2808 ++ 1 hcontrast ++ 1 heckeop ++ 1 helger ++ 1 henriqueribeiro ++ 1 herrfz ++ 1 hesham.shabana@hotmail.com ++ 1 hhuuggoo ++ 1 hironow ++ 1 hongshaoyang ++ 1 hshimizu77 ++ 1 hsperr ++ 1 huashuai ++ 1 hunterowens ++ 1 iamshwin ++ 1 iamsimha ++ 1 ian ++ 1 ignamv ++ 1 igorfassen ++ 1 iulia ++ 1 jackieleng ++ 1 jalazbe ++ 1 jalbritt ++ 1 jamesoliverh ++ 1 jaredsnyder ++ 1 jayfoad ++ 1 jazzmuesli ++ 1 jebob ++ 1 jeps-journal ++ 1 jeschwar ++ 1 jfoo ++ 1 jh-wu ++ 1 jjames34 ++ 1 jkovacevic ++ 1 jniznan ++ 1 joaoavf ++ 1 joders ++ 1 jojomdt ++ 1 josham ++ 1 joy-rosie ++ 1 jsexauer ++ 1 juan huguet ++ 1 juricast ++ 1 justinchan23 ++ 1 kaustuv deolal ++ 1 kdiether ++ 1 kiwirob ++ 1 kjford ++ 1 klonuo ++ 1 knuu ++ 1 kotrfa ++ 1 kpflugshaupt ++ 1 krey ++ 1 ksanghai ++ 1 l736x ++ 1 larvian ++ 1 leerssej ++ 1 lenolib ++ 1 lexy-lixinyu ++ 1 lgautier ++ 1 lloydkirk ++ 1 lodagro ++ 1 lrjball ++ 1 lucas ++ 1 lucyleeow ++ 1 maheshbapatu ++ 1 majiang ++ 1 manikbhandari ++ 1 manu ++ 1 manuels ++ 1 marcosrullan ++ 1 matthiashuschle ++ 1 mattrijk ++ 1 maxalbert ++ 1 maximilianr ++ 1 maxwasserman ++ 1 mazayo ++ 1 mck619 ++ 1 mcocdawc ++ 1 mdeboc ++ 1 mgilbert ++ 1 miguelmorin ++ 1 mikebailey ++ 1 miquelcamprodon ++ 1 mjlove12 ++ 1 monicaBee ++ 1 mpuels ++ 1 mschmohl ++ 1 mwaskom ++ 1 naveenkaushik2504 ++ 1 nicolab100 ++ 1 nileracecrew ++ 1 nmartensen ++ 1 nprad ++ 1 nuffe ++ 1 ojdo ++ 1 omtinez ++ 1 orereta ++ 1 pajachiet ++ 1 pallav-fdsi ++ 1 pandas-docs-bot ++ 1 parchd-1 ++ 1 paul-mannino ++ 1 pbreach ++ 1 peadarcoyle ++ 1 peterpanmj ++ 1 philipphanemann ++ 1 pijucha ++ 1 pmaxey83 ++ 1 pqzx ++ 1 ptype ++ 1 rafarui ++ 1 raguiar2 ++ 1 ranarag ++ 1 raph-m ++ 1 ratijas ++ 1 rdk1024 ++ 1 readyready15728 ++ 1 rhstanton ++ 1 ribonoous ++ 1 rmihael ++ 1 rmunjal29 ++ 1 robertzk ++ 1 rs2 ++ 1 ruiann ++ 1 s-weigand ++ 1 sakkemo ++ 1 samghelms ++ 1 sangarshanan ++ 1 sanguineturtle ++ 1 sardonick ++ 1 saskakarsi ++ 1 saurav2608 ++ 1 scriptomation ++ 1 seales ++ 1 sfoo ++ 1 shaido987 ++ 1 shawnbrown ++ 1 sideeye ++ 1 silentquasar ++ 1 skwbc ++ 1 someben ++ 1 stahlous ++ 1 stas-sl ++ 1 stefansimik ++ 1 step4me ++ 1 stephenwlin ++ 1 steveayers124 ++ 1 stijnvanhoey ++ 1 sudhir mohanraj ++ 1 surveymedia.ca ++ 1 svaksha ++ 1 syutbai ++ 1 tadashigaki ++ 1 tadeja ++ 1 tamuhey ++ 1 testvinder ++ 1 thauck ++ 1 the-nose-knows ++ 1 theandygross ++ 1 themrmax ++ 1 thuske ++ 1 timcera ++ 1 tlaytongoogle ++ 1 tom-alcorn ++ 1 tomascassidy ++ 1 tomrod ++ 1 tsvikas ++ 1 tv3141 ++ 1 tworec ++ 1 verakai ++ 1 vkk800 ++ 1 vytas ++ 1 wandersoncferreira ++ 1 watercrossing ++ 1 wavedatalab ++ 1 willweil ++ 1 winlu ++ 1 xgdgsc ++ 1 yehia67 ++ 1 yelite ++ 1 yhaque1213 ++ 1 ym-pett ++ 1 yrhooke ++ 1 ysau ++ 1 ywpark1 ++ 1 zachcp ++ 1 zertrin ++ 1 zhanghui ++ 1 zhangjinjie ++ 1 zhezherun ++ 1 znmean ++ 1 zys5945 ++ 1 zzgao ++ 1 Åsmund Hjulstad ++ 1 Øystein S. Haaland ++ 1 颜发才(Yan Facai) ++ ++Debian packaging ++ 323 Yaroslav Halchenko ++ 183 Rebecca N. Palmer ++ 49 Mo Zhou ++ 25 Andreas Tille ++ 25 Graham Inggs ++ 4 Dmitry Shachnev ++ 2 Julian Taylor ++ 2 Yaroslav O Halchenko ++ 1 Diane Trout ++ 1 Ole Streicher diff --cc debian/control index 00000000,00000000..1c7915da new file mode 100644 --- /dev/null +++ b/debian/control @@@ -1,0 -1,0 +1,179 @@@ ++Source: pandas ++Section: python ++Priority: optional ++Maintainer: Debian Science Team ++Uploaders: Yaroslav Halchenko , ++ Michael Hanke , ++ Rebecca N. Palmer ++Build-Depends: debhelper-compat (= 13), ++ dh-python, ++ locales-all, ++ python3-all-dev, ++ cython3 (>= 0.29.13~), ++ python3-bottleneck (>= 1.2.1~) , ++ python3-bs4 , ++ python3-dask (>= 2.10.1~) , ++ python3-dateutil, ++ python3-html5lib , ++ python3-hypothesis , ++ python3-jinja2 , ++ python3-lxml , ++ python3-matplotlib [!hurd-i386], ++# armel, ppc64el, s390x numba crash, mipsel gives wrong answer, most ports don't have numba ++ python3-numba (>= 0.46.0~) [amd64 i386] , ++ python3-numexpr (>= 2.6.2~) , ++ python3-numpy, ++ python3-odf , ++ python3-openpyxl , ++# upstream actually want pytest >= 5.0.1 but we don't have that ++ python3-pytest (>= 4.0.2~) , ++# #969050 python3-pytest-asyncio , ++ python3-pytest-forked , ++ python3-pytest-xdist (>= 1.21~) , ++ python3-scipy, ++ python3-setuptools, ++ python3-six, ++ python3-tables (>= 3.4.2~) , ++# too old in Debian python3-tabulate (>= 0.8.3~) , ++ python3-tk , ++ python3-tz , ++ python3-xlrd , ++ python3-xlsxwriter , ++ python3-xlwt , ++ xvfb , ++ xauth , ++ xclip , ++Build-Depends-Indep: ++ python3-sphinx , ++ python3-ipykernel , ++ python3-nbconvert (>= 5.4.1~) , ++ python3-nbsphinx , ++ python3-numpydoc , ++ ipython3 (>= 7.11.1~) , ++ jdupes , ++# for style.ipynb ++ pandoc , ++# for intersphinx inventories ++ python3-doc , ++ python-numpy-doc , ++ python-scipy-doc , ++ python-matplotlib-doc , ++ python-statsmodels-doc , ++# these are for not having (as many) exception messages in documentation examples ++# so may be temporarily removed if they are broken or to break bootstrap cycles ++# not in Debian (not to be confused with python3-arrow) python3-pyarrow , ++ python3-ipywidgets , ++ python3-rpy2 , ++ python3-seaborn , ++ python3-sqlalchemy , ++ python3-statsmodels , ++ python3-xarray , ++Standards-Version: 4.5.0 ++Rules-Requires-Root: no ++Homepage: https://pandas.pydata.org/ ++Vcs-Browser: https://salsa.debian.org/science-team/pandas ++Vcs-Git: https://salsa.debian.org/science-team/pandas.git ++ ++Package: python3-pandas ++Architecture: all ++Depends: ${misc:Depends}, ${python3:Depends}, ++ python3-numpy (>= 1:1.15~), ++ python3-dateutil, ++ python3-pandas-lib (>= ${source:Version}), ++ python3-pkg-resources, ++ python3-six, ++Recommends: python3-scipy, ++ python3-matplotlib, ++ python3-tz, ++# for faster processing ++# see -lib for python3-numba ++ python3-bottleneck, ++ python3-numexpr, ++# for spreadsheet I/O ++ python3-odf, ++ python3-openpyxl, ++ python3-xlrd, ++ python3-xlwt, ++# for HTML table I/O ++ python3-bs4, ++ python3-html5lib, ++ python3-lxml, ++# for HDF5 I/O ++ python3-tables, ++# for styled output ++ python3-jinja2, ++Suggests: python-pandas-doc, ++ python3-statsmodels ++Breaks: ++# 0.23 -> 0.25 API breaks, #931557 ++ cnvkit (<< 0.9.6-2~), ++# 0.25 -> 1.0 API breaks, #950430 ++ python3-dask (<< 2.10.1~), ++ python3-biom-format (<< 2.1.8+dfsg-3.1~), ++ python3-feather-format (<< 0.3.1+dfsg1-4~), ++ python3-skbio (<< 0.5.5-4~), ++ python3-statsmodels (<< 0.11.0~), ++ python3-seaborn (<< 0.9.1~), ++ q2-demux (<< 2019.10.0-1.1~), ++ q2templates (<< 2019.10.0+dfsg-1.1~), ++ q2-types (<< 2019.10.0-1.1~) ++Description: data structures for "relational" or "labeled" data ++ pandas is a Python package providing fast, flexible, and expressive ++ data structures designed to make working with "relational" or ++ "labeled" data both easy and intuitive. It aims to be the fundamental ++ high-level building block for doing practical, real world data ++ analysis in Python. pandas is well suited for many different kinds of ++ data: ++ . ++ - Tabular data with heterogeneously-typed columns, as in an SQL ++ table or Excel spreadsheet ++ - Ordered and unordered (not necessarily fixed-frequency) time ++ series data. ++ - Arbitrary matrix data (homogeneously typed or heterogeneous) with ++ row and column labels ++ - Any other form of observational / statistical data sets. The data ++ actually need not be labeled at all to be placed into a pandas ++ data structure ++ . ++ This package contains the Python 3 version. ++ ++Package: python-pandas-doc ++Architecture: all ++Section: doc ++Depends: ${misc:Depends}, ++ libjs-jquery, ++ libjs-requirejs, ++ libjs-underscore, ++ libjs-mathjax ++Suggests: python3-pandas ++Description: data structures for "relational" or "labeled" data - documentation ++ pandas is a Python package providing fast, flexible, and expressive ++ data structures designed to make working with "relational" or ++ "labeled" data both easy and intuitive. It aims to be the fundamental ++ high-level building block for doing practical, real world data ++ analysis in Python. pandas is well suited for many different kinds of ++ data: ++ . ++ - Tabular data with heterogeneously-typed columns, as in an SQL ++ table or Excel spreadsheet ++ - Ordered and unordered (not necessarily fixed-frequency) time ++ series data. ++ - Arbitrary matrix data (homogeneously typed or heterogeneous) with ++ row and column labels ++ - Any other form of observational / statistical data sets. The data ++ actually need not be labeled at all to be placed into a pandas ++ data structure ++ . ++ This package contains the documentation. ++ ++Package: python3-pandas-lib ++Architecture: any ++Multi-Arch: same ++Depends: ${misc:Depends}, ${shlibs:Depends}, ${python3:Depends}, python3-numpy (>=1:1.15~) ++# this is here to allow it to be arch-specific, to avoid numba bugs on other architectures ++Recommends: python3-numba [amd64] ++Description: low-level implementations and bindings for pandas ++ This is a low-level package for python3-pandas providing ++ architecture-dependent extensions. ++ . ++ Users should not need to install it directly. diff --cc debian/copyright index 00000000,00000000..42537173 new file mode 100644 --- /dev/null +++ b/debian/copyright @@@ -1,0 -1,0 +1,737 @@@ ++Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ ++Upstream-Name: pandas ++Upstream-Contact: pandas-dev@python.org ++Source: https://github.com/pandas-dev/pandas ++Files-Excluded: doc/source/user_guide/cookbook.rst ++Comment: cookbook has around 100 Stack Overflow snippets (CC-BY-SA with possibly inadequate attribution) ++ ++Files: * ++Copyright: 2008-2011 AQR Capital Management, LLC ++ 2011 Wes McKinney and pandas developers ++ 2011-2020 Lambda Foundry, Inc. and PyData Development Team ++License: BSD-3 ++Comment: The original source contains some CC-BY-SA snippets by Stack Overflow users (https://stackoverflow.com/users/1114/jeremy-banks, https://stackoverflow.com/users/387251/oliver, https://stackoverflow.com/users/3297752/noah-motion, https://stackoverflow.com/users/925671/bill, https://stackoverflow.com/users/1082349/foobar, https://stackoverflow.com/users/3089209/crantila, https://stackoverflow.com/users/2375855/ojdo, https://stackoverflow.com/users/487339/dsm, https://stackoverflow.com/users/2677943/swenzel), but these may be too small to be copyrightable, and the less trivial ones are patched out in this package ++ ++Files: doc/source/themes/nature_with_gtoc/* ++Copyright: 2007-2011 by the Sphinx team ++License: BSD-2 ++ ++Files: doc/sphinxext/* ++Copyright: 2008, Stefan van der Walt , Pauli Virtanen ++License: BSD-2 ++ ++Files: doc/sphinxext/announce.py ++Copyright: 2001-2017 Enthought, Inc. and SciPy Developers. ++ 2017-2020 Lambda Foundry, Inc. and PyData Development Team ++License: BSD-3 ++Comment: it is possible that other code was also taken from Scipy ++ ++Files: pandas/_libs/src/headers/portable.h ++Copyright: 2005-2014 Rich Felker and contributors ++ 2008-2019, AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team ++License: Expat and BSD-3 ++Origin: musl (partly) ++Comment: this might or might not be from a public domain part of musl ++ ++Files: pandas/_libs/src/headers/ms_* ++Copyright: 2006-2008 Alexander Chemeris ++License: BSD-3 ++ ++Files: pandas/_libs/src/klib/* ++Copyright: 2008, 2009, 2011 by Attractive Chaos ++License: Expat ++ ++Files: pandas/_libs/src/parser/tokenizer.* ++Copyright: 2002 Michael Ringgaard ++ 2011-2012 Warren Weckesser ++ 2001-2012 Python Software Foundation and Python contributors ++ 2012-2020 Lambda Foundry, Inc. and PyData Development Team ++License: Python and BSD-3 ++Origin: csv (Python standard library), github.com/WarrenWeckesser/textreader ++ ++Files: pandas/_libs/src/skiplist.h ++Copyright: 2009, Raymond Hettinger ++ 2011-2019 Wes McKinney and PyData Development Team ++License: Expat and BSD-3 ++Origin: http://code.activestate.com/recipes/576930/ ++Comment: it is a Cython code "inspired" by the original Python code by Raymond ++ ++Files: pandas/_libs/src/ujson/* ++Copyright: 1988-1993 The Regents of the University of California ++ 1994 Sun Microsystems, Inc. ++ 2007 Nick Galbreath ++ 2011-2013 ESN Social Software AB and Jonas Tarnstrom ++ 2012-2020 Lambda Foundry, Inc. and PyData Development Team ++License: BSD-3 and Expat ++Origin: ultrajson ++ ++Files: pandas/_libs/tslibs/parsing.pyx ++Copyright: 2003-2011 - Gustavo Niemeyer ++ 2012-2014 - Tomi Pieviläinen ++ 2014-2016 - Yaron de Leeuw ++ 2015-2017 - Paul Ganssle ++ 2015-2017 - dateutil contributors (see AUTHORS file) ++ 2008-2019, AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team ++License: BSD-3 ++Origin: dateutil (partly) ++ ++Files: pandas/_libs/tslibs/src/datetime/* ++Copyright: 2005-2013, NumPy Developers ++License: BSD-3 ++Origin: numpy ++Comment: Listed as derived from Numpy 1.7 ++ ++Files: pandas/_libs/window/aggregations.pyx ++ pandas/tests/window/moments/test_moments_rolling.py ++Copyright: 2010-2012 Archipel Asset Management AB ++ 2011-2019 Lambda Foundry, Inc. and PyData Development Team ++License: BSD-3 ++Origin: bottleneck (partly) ++Comment: Original was BSD-2, but BSD-2 and BSD-3 = BSD-3 ++ ++Files: pandas/compat/* ++Copyright: 2010-2013 Benjamin Peterson ++ 2012-2020 Lambda Foundry, Inc. and PyData Development Team ++License: Expat and BSD-3 ++Origin: six ++ ++Files: pandas/core/accessor.py ++Copyright: 2014-2018 xarray developers ++ 2018-2019 Lambda Foundry, Inc. and PyData Development Team ++License: Apache-2.0 and BSD-3 ++Origin: xarray (partly) ++ ++Files: pandas/io/clipboard/* ++Copyright: 2010-2017 Albert Sweigart and Pyperclip contributors ++ 2016-2019 Lambda Foundry, Inc. and PyData Development Team ++License: BSD-3 ++Origin: Pyperclip ++ ++Files: pandas/io/sas/sas7bdat.py ++Copyright: 2015 Jared Hobbs ++ 2016-2019 Lambda Foundry, Inc. and PyData Development Team ++Origin: https://bitbucket.org/jaredhobbs/sas7bdat ++License: Expat ++ ++Files: pandas/tests/io/data/html/banklist.html ++ pandas/tests/io/data/csv/banklist.csv ++ pandas/tests/io/data/html/spam.html ++Copyright: None; by Federal Deposit Insurance Corporation and US Department of Agriculture ++License: public-domain ++ ++Files: pandas/tests/io/data/html/wikipedia_states.html ++Copyright: 2002-2014 Wikipedia contributors (full list: https://en.wikipedia.org/w/index.php?title=List_of_U.S._states_and_territories_by_area&offset=20140630&action=history) ++License: CC-BY-SA-3.0 ++ ++Files: scripts/find_commits_touching_func.py ++Copyright: 2013 y-p @ github ++ 2013-2017 Lambda Foundry, Inc. and PyData Development Team ++License: BSD-3 ++ ++Files: setup.py ++Copyright: 2009-2012, Brian Granger, Min Ragan-Kelley (from pyzmq) ++ 2004 Infrae (from lxml) ++ 2008-2020, AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team ++License: BSD-3 ++ ++Files: debian/* ++Copyright: 2011-2018, Yaroslav Halchenko ++ 2019-2020, Debian Science Team ++License: BSD-3 ++ ++License: BSD-2 ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions are ++ met: ++ . ++ 1. Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ 2. Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ . ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++License: BSD-3 ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions are ++ met: ++ . ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above ++ copyright notice, this list of conditions and the following ++ disclaimer in the documentation and/or other materials provided ++ with the distribution. ++ * Neither the name of the copyright holder nor the names of any ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ . ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++License: Expat ++ Permission is hereby granted, free of charge, to any person obtaining ++ a copy of this software and associated documentation files (the ++ "Software"), to deal in the Software without restriction, including ++ without limitation the rights to use, copy, modify, merge, publish, ++ distribute, sublicense, and/or sell copies of the Software, and to ++ permit persons to whom the Software is furnished to do so, subject to ++ the following conditions: ++ . ++ The above copyright notice and this permission notice shall be ++ included in all copies or substantial portions of the Software. ++ . ++ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS ++ BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ++ ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ++ SOFTWARE. ++ ++License: Apache-2.0 ++ Licensed under the Apache License, Version 2.0 (the "License"); ++ you may not use this file except in compliance with the License. ++ You may obtain a copy of the License at ++ . ++ http://www.apache.org/licenses/LICENSE-2.0 ++ . ++ Unless required by applicable law or agreed to in writing, software ++ distributed under the License is distributed on an "AS IS" BASIS, ++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ See the License for the specific language governing permissions and ++ limitations under the License. ++ . ++ On Debian systems full text of the license could be found in ++ /usr/share/common-licenses/Apache-2.0 . ++ ++License: Python ++ PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 ++ -------------------------------------------- ++ . ++ 1. This LICENSE AGREEMENT is between the Python Software Foundation ++ ("PSF"), and the Individual or Organization ("Licensee") accessing and ++ otherwise using this software ("Python") in source or binary form and ++ its associated documentation. ++ . ++ 2. Subject to the terms and conditions of this License Agreement, PSF hereby ++ grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, ++ analyze, test, perform and/or display publicly, prepare derivative works, ++ distribute, and otherwise use Python alone or in any derivative version, ++ provided, however, that PSF's License Agreement and PSF's notice of copyright, ++ i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 ++ Python Software Foundation; All Rights Reserved" are retained in Python alone or ++ in any derivative version prepared by Licensee. ++ . ++ 3. In the event Licensee prepares a derivative work that is based on ++ or incorporates Python or any part thereof, and wants to make ++ the derivative work available to others as provided herein, then ++ Licensee hereby agrees to include in any such work a brief summary of ++ the changes made to Python. ++ . ++ 4. PSF is making Python available to Licensee on an "AS IS" ++ basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR ++ IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND ++ DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS ++ FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT ++ INFRINGE ANY THIRD PARTY RIGHTS. ++ . ++ 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON ++ FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS ++ A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, ++ OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. ++ . ++ 6. This License Agreement will automatically terminate upon a material ++ breach of its terms and conditions. ++ . ++ 7. Nothing in this License Agreement shall be deemed to create any ++ relationship of agency, partnership, or joint venture between PSF and ++ Licensee. This License Agreement does not grant permission to use PSF ++ trademarks or trade name in a trademark sense to endorse or promote ++ products or services of Licensee, or any third party. ++ . ++ 8. By copying, installing or otherwise using Python, Licensee ++ agrees to be bound by the terms and conditions of this License ++ Agreement. ++ ++License: public-domain ++ US federal government works ++ ++License: CC-BY-SA-3.0 ++ Creative Commons Attribution-ShareAlike 3.0 Unported ++ . ++ . ++ . ++ CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE LEGAL SERVICES. DISTRIBUTION OF THIS ++ LICENSE DOES NOT CREATE AN ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS INFORMATION ON ++ AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES REGARDING THE INFORMATION PROVIDED, ++ AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM ITS USE. ++ . ++ License ++ . ++ . ++ . ++ THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS PUBLIC LICENSE ++ ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER APPLICABLE ++ LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS ++ PROHIBITED. ++ . ++ BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE BOUND BY THE TERMS OF THIS ++ LICENSE. TO THE EXTENT THIS LICENSE MAY BE CONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE ++ RIGHTS CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS. ++ . ++ . ++ . ++ . ++ . ++ 1. ++ Definitions ++ . ++ . ++ . ++ . ++ a. ++ "Adaptation" means a work based upon the Work, or upon the Work and other ++ pre-existing works, such as a translation, adaptation, derivative work, arrangement of ++ music or other alterations of a literary or artistic work, or phonogram or performance and ++ includes cinematographic adaptations or any other form in which the Work may be recast, ++ transformed, or adapted including in any form recognizably derived from the original, ++ except that a work that constitutes a Collection will not be considered an Adaptation for ++ the purpose of this License. For the avoidance of doubt, where the Work is a musical work, ++ performance or phonogram, the synchronization of the Work in timed-relation with a moving ++ image ("synching") will be considered an Adaptation for the purpose of this ++ License. ++ . ++ . ++ . ++ b. ++ "Collection" means a collection of literary or artistic works, such as ++ encyclopedias and anthologies, or performances, phonograms or broadcasts, or other works ++ or subject matter other than works listed in Section 1(f) below, which, by reason of the ++ selection and arrangement of their contents, constitute intellectual creations, in which ++ the Work is included in its entirety in unmodified form along with one or more other ++ contributions, each constituting separate and independent works in themselves, which ++ together are assembled into a collective whole. A work that constitutes a Collection will ++ not be considered an Adaptation (as defined below) for the purposes of this License. ++ . ++ . ++ . ++ c. ++ "Creative Commons Compatible License" means a license that is listed at ++ http://creativecommons.org/compatiblelicenses that has been approved by Creative Commons ++ as being essentially equivalent to this License, including, at a minimum, because that ++ license: (i) contains terms that have the same purpose, meaning and effect as the License ++ Elements of this License; and, (ii) explicitly permits the relicensing of adaptations of ++ works made available under that license under this License or a Creative Commons ++ jurisdiction license with the same License Elements as this License. ++ . ++ . ++ . ++ d. ++ "Distribute" means to make available to the public the original and copies of the ++ Work or Adaptation, as appropriate, through sale or other transfer of ownership. ++ . ++ . ++ . ++ e. ++ "License Elements" means the following high-level license attributes as selected by ++ Licensor and indicated in the title of this License: Attribution, ShareAlike. ++ . ++ . ++ . ++ f. ++ "Licensor" means the individual, individuals, entity or entities that offer(s) the ++ Work under the terms of this License. ++ . ++ . ++ . ++ g. ++ "Original Author" means, in the case of a literary or artistic work, the ++ individual, individuals, entity or entities who created the Work or if no individual or ++ entity can be identified, the publisher; and in addition (i) in the case of a performance ++ the actors, singers, musicians, dancers, and other persons who act, sing, deliver, ++ declaim, play in, interpret or otherwise perform literary or artistic works or expressions ++ of folklore; (ii) in the case of a phonogram the producer being the person or legal entity ++ who first fixes the sounds of a performance or other sounds; and, (iii) in the case of ++ broadcasts, the organization that transmits the broadcast. ++ . ++ . ++ . ++ h. ++ "Work" means the literary and/or artistic work offered under the terms of this ++ License including without limitation any production in the literary, scientific and ++ artistic domain, whatever may be the mode or form of its expression including digital ++ form, such as a book, pamphlet and other writing; a lecture, address, sermon or other work ++ of the same nature; a dramatic or dramatico-musical work; a choreographic work or ++ entertainment in dumb show; a musical composition with or without words; a cinematographic ++ work to which are assimilated works expressed by a process analogous to cinematography; a ++ work of drawing, painting, architecture, sculpture, engraving or lithography; a ++ photographic work to which are assimilated works expressed by a process analogous to ++ photography; a work of applied art; an illustration, map, plan, sketch or ++ three-dimensional work relative to geography, topography, architecture or science; a ++ performance; a broadcast; a phonogram; a compilation of data to the extent it is protected ++ as a copyrightable work; or a work performed by a variety or circus performer to the ++ extent it is not otherwise considered a literary or artistic work. ++ . ++ . ++ . ++ i. ++ "You" means an individual or entity exercising rights under this License who has ++ not previously violated the terms of this License with respect to the Work, or who has ++ received express permission from the Licensor to exercise rights under this License ++ despite a previous violation. ++ . ++ . ++ . ++ j. ++ "Publicly Perform" means to perform public recitations of the Work and to ++ communicate to the public those public recitations, by any means or process, including by ++ wire or wireless means or public digital performances; to make available to the public ++ Works in such a way that members of the public may access these Works from a place and at ++ a place individually chosen by them; to perform the Work to the public by any means or ++ process and the communication to the public of the performances of the Work, including by ++ public digital performance; to broadcast and rebroadcast the Work by any means including ++ signs, sounds or images. ++ . ++ . ++ . ++ k. ++ "Reproduce" means to make copies of the Work by any means including without ++ limitation by sound or visual recordings and the right of fixation and reproducing ++ fixations of the Work, including storage of a protected performance or phonogram in ++ digital form or other electronic medium. ++ . ++ . ++ . ++ . ++ . ++ 2. ++ Fair Dealing Rights. Nothing in this License is intended to reduce, limit, or restrict any uses ++ free from copyright or rights arising from limitations or exceptions that are provided for in ++ connection with the copyright protection under copyright law or other applicable laws. ++ . ++ . ++ . ++ 3. ++ License Grant. Subject to the terms and conditions of this License, Licensor hereby grants You a ++ worldwide, royalty-free, non-exclusive, perpetual (for the duration of the applicable ++ copyright) license to exercise the rights in the Work as stated below: ++ . ++ . ++ . ++ . ++ a. ++ to Reproduce the Work, to incorporate the Work into one or more Collections, and to Reproduce ++ the Work as incorporated in the Collections; ++ . ++ . ++ . ++ b. ++ to create and Reproduce Adaptations provided that any such Adaptation, including any ++ translation in any medium, takes reasonable steps to clearly label, demarcate or otherwise ++ identify that changes were made to the original Work. For example, a translation could be ++ marked "The original work was translated from English to Spanish," or a ++ modification could indicate "The original work has been modified."; ++ . ++ . ++ . ++ c. ++ to Distribute and Publicly Perform the Work including as incorporated in Collections; and, ++ . ++ . ++ . ++ d. ++ to Distribute and Publicly Perform Adaptations. ++ . ++ . ++ . ++ e. ++ For the avoidance of doubt: ++ . ++ . ++ . ++ . ++ i. ++ Non-waivable Compulsory License Schemes. In those jurisdictions in which the right to ++ collect royalties through any statutory or compulsory licensing scheme cannot be ++ waived, the Licensor reserves the exclusive right to collect such royalties for any ++ exercise by You of the rights granted under this License; ++ . ++ . ++ . ++ ii. ++ Waivable Compulsory License Schemes. In those jurisdictions in which the right to collect ++ royalties through any statutory or compulsory licensing scheme can be waived, the ++ Licensor waives the exclusive right to collect such royalties for any exercise by You ++ of the rights granted under this License; and, ++ . ++ . ++ . ++ iii. ++ Voluntary License Schemes. The Licensor waives the right to collect royalties, whether ++ individually or, in the event that the Licensor is a member of a collecting society ++ that administers voluntary licensing schemes, via that society, from any exercise by ++ You of the rights granted under this License. ++ . ++ . ++ . ++ . ++ The above rights may be exercised in all media and formats whether now known or hereafter ++ devised. The above rights include the right to make such modifications as are ++ technically necessary to exercise the rights in other media and formats. Subject to ++ Section 8(f), all rights not expressly granted by Licensor are hereby reserved. ++ . ++ . ++ . ++ . ++ 4. ++ Restrictions. The license granted in Section 3 above is expressly made subject to and limited by ++ the following restrictions: ++ . ++ . ++ . ++ . ++ a. ++ You may Distribute or Publicly Perform the Work only under the terms of this License. You ++ must include a copy of, or the Uniform Resource Identifier (URI) for, this License with ++ every copy of the Work You Distribute or Publicly Perform. You may not offer or impose any ++ terms on the Work that restrict the terms of this License or the ability of the recipient ++ of the Work to exercise the rights granted to that recipient under the terms of the ++ License. You may not sublicense the Work. You must keep intact all notices that refer to ++ this License and to the disclaimer of warranties with every copy of the Work You ++ Distribute or Publicly Perform. When You Distribute or Publicly Perform the Work, You may ++ not impose any effective technological measures on the Work that restrict the ability of a ++ recipient of the Work from You to exercise the rights granted to that recipient under the ++ terms of the License. This Section 4(a) applies to the Work as incorporated in a ++ Collection, but this does not require the Collection apart from the Work itself to be made ++ subject to the terms of this License. If You create a Collection, upon notice from any ++ Licensor You must, to the extent practicable, remove from the Collection any credit as ++ required by Section 4(c), as requested. If You create an Adaptation, upon notice from any ++ Licensor You must, to the extent practicable, remove from the Adaptation any credit as ++ required by Section 4(c), as requested. ++ . ++ . ++ . ++ b. ++ You may Distribute or Publicly Perform an Adaptation only under the terms of: (i) this ++ License; (ii) a later version of this License with the same License Elements as this ++ License; (iii) a Creative Commons jurisdiction license (either this or a later license ++ version) that contains the same License Elements as this License (e.g., ++ Attribution-ShareAlike 3.0 US)); (iv) a Creative Commons Compatible License. If you ++ license the Adaptation under one of the licenses mentioned in (iv), you must comply with ++ the terms of that license. If you license the Adaptation under the terms of any of the ++ licenses mentioned in (i), (ii) or (iii) (the "Applicable License"), you must ++ comply with the terms of the Applicable License generally and the following provisions: ++ (I) You must include a copy of, or the URI for, the Applicable License with every copy of ++ each Adaptation You Distribute or Publicly Perform; (II) You may not offer or impose any ++ terms on the Adaptation that restrict the terms of the Applicable License or the ability ++ of the recipient of the Adaptation to exercise the rights granted to that recipient under ++ the terms of the Applicable License; (III) You must keep intact all notices that refer to ++ the Applicable License and to the disclaimer of warranties with every copy of the Work as ++ included in the Adaptation You Distribute or Publicly Perform; (IV) when You Distribute or ++ Publicly Perform the Adaptation, You may not impose any effective technological measures ++ on the Adaptation that restrict the ability of a recipient of the Adaptation from You to ++ exercise the rights granted to that recipient under the terms of the Applicable License. ++ This Section 4(b) applies to the Adaptation as incorporated in a Collection, but this does ++ not require the Collection apart from the Adaptation itself to be made subject to the ++ terms of the Applicable License. ++ . ++ . ++ . ++ c. ++ If You Distribute, or Publicly Perform the Work or any Adaptations or Collections, You must, ++ unless a request has been made pursuant to Section 4(a), keep intact all copyright notices ++ for the Work and provide, reasonable to the medium or means You are utilizing: (i) the ++ name of the Original Author (or pseudonym, if applicable) if supplied, and/or if the ++ Original Author and/or Licensor designate another party or parties (e.g., a sponsor ++ institute, publishing entity, journal) for attribution ("Attribution Parties") ++ in Licensor's copyright notice, terms of service or by other reasonable means, the ++ name of such party or parties; (ii) the title of the Work if supplied; (iii) to the extent ++ reasonably practicable, the URI, if any, that Licensor specifies to be associated with the ++ Work, unless such URI does not refer to the copyright notice or licensing information for ++ the Work; and (iv), consistent with ++ Section ++ 3(b), in the case of an Adaptation, a credit ++ identifying the use of the Work in the Adaptation (e.g., "French translation of the ++ Work by Original Author," or "Screenplay based on original Work by Original ++ Author"). The credit required by this Section 4(c) may be implemented in any ++ reasonable manner; provided, however, that in the case of a Adaptation or Collection, at a ++ minimum such credit will appear, if a credit for all contributing authors of the ++ Adaptation or Collection appears, then as part of these credits and in a manner at least ++ as prominent as the credits for the other contributing authors. For the avoidance of ++ doubt, You may only use the credit required by this Section for the purpose of attribution ++ in the manner set out above and, by exercising Your rights under this License, You may not ++ implicitly or explicitly assert or imply any connection with, sponsorship or endorsement ++ by the Original Author, Licensor and/or Attribution Parties, as appropriate, of You or ++ Your use of the Work, without the separate, express prior written permission of the ++ Original Author, Licensor and/or Attribution Parties. ++ . ++ . ++ . ++ d. ++ Except as otherwise agreed in writing by the Licensor or as may be otherwise permitted by ++ applicable law, if You Reproduce, Distribute or Publicly Perform the Work either by itself ++ or as part of any Adaptations or Collections, You must not distort, mutilate, modify or ++ take other derogatory action in relation to the Work which would be prejudicial to the ++ Original Author's honor or reputation. Licensor agrees that in those jurisdictions ++ (e.g. Japan), in which any exercise of the right granted in Section 3(b) of this License ++ (the right to make Adaptations) would be deemed to be a distortion, mutilation, ++ modification or other derogatory action prejudicial to the Original Author's honor ++ and reputation, the Licensor will waive or not assert, as appropriate, this Section, to ++ the fullest extent permitted by the applicable national law, to enable You to reasonably ++ exercise Your right under Section 3(b) of this License (right to make Adaptations) but not ++ otherwise. ++ . ++ . ++ . ++ . ++ . ++ 5. ++ Representations, Warranties and Disclaimer ++ UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR OFFERS THE WORK AS-IS AND ++ MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE WORK, EXPRESS, IMPLIED, ++ STATUTORY OR OTHERWISE, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY, ++ FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, ++ ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR NOT DISCOVERABLE. SOME ++ JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT ++ APPLY TO YOU. ++ . ++ . ++ . ++ . ++ 6. ++ Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, IN NO EVENT WILL ++ LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, ++ PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF ++ LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. ++ . ++ . ++ . ++ 7. ++ Termination ++ . ++ . ++ . ++ . ++ a. ++ This License and the rights granted hereunder will terminate automatically upon any breach by ++ You of the terms of this License. Individuals or entities who have received Adaptations or ++ Collections from You under this License, however, will not have their licenses terminated ++ provided such individuals or entities remain in full compliance with those licenses. ++ Sections 1, 2, 5, 6, 7, and 8 will survive any termination of this License. ++ . ++ . ++ . ++ b. ++ Subject to the above terms and conditions, the license granted here is perpetual (for the ++ duration of the applicable copyright in the Work). Notwithstanding the above, Licensor ++ reserves the right to release the Work under different license terms or to stop ++ distributing the Work at any time; provided, however that any such election will not serve ++ to withdraw this License (or any other license that has been, or is required to be, ++ granted under the terms of this License), and this License will continue in full force and ++ effect unless terminated as stated above. ++ . ++ . ++ . ++ . ++ . ++ 8. ++ Miscellaneous ++ . ++ . ++ . ++ . ++ a. ++ Each time You Distribute or Publicly Perform the Work or a Collection, the Licensor offers to ++ the recipient a license to the Work on the same terms and conditions as the license ++ granted to You under this License. ++ . ++ . ++ . ++ b. ++ Each time You Distribute or Publicly Perform an Adaptation, Licensor offers to the recipient ++ a license to the original Work on the same terms and conditions as the license granted to ++ You under this License. ++ . ++ . ++ . ++ c. ++ If any provision of this License is invalid or unenforceable under applicable law, it shall ++ not affect the validity or enforceability of the remainder of the terms of this License, ++ and without further action by the parties to this agreement, such provision shall be ++ reformed to the minimum extent necessary to make such provision valid and enforceable. ++ . ++ . ++ . ++ d. ++ No term or provision of this License shall be deemed waived and no breach consented to unless ++ such waiver or consent shall be in writing and signed by the party to be charged with such ++ waiver or consent. ++ . ++ . ++ . ++ e. ++ This License constitutes the entire agreement between the parties with respect to the Work ++ licensed here. There are no understandings, agreements or representations with respect to ++ the Work not specified here. Licensor shall not be bound by any additional provisions that ++ may appear in any communication from You. This License may not be modified without the ++ mutual written agreement of the Licensor and You. ++ . ++ . ++ . ++ f. ++ The rights granted under, and the subject matter referenced, in this License were drafted ++ utilizing the terminology of the Berne Convention for the Protection of Literary and ++ Artistic Works (as amended on September 28, 1979), the Rome Convention of 1961, the WIPO ++ Copyright Treaty of 1996, the WIPO Performances and Phonograms Treaty of 1996 and the ++ Universal Copyright Convention (as revised on July 24, 1971). These rights and subject ++ matter take effect in the relevant jurisdiction in which the License terms are sought to ++ be enforced according to the corresponding provisions of the implementation of those ++ treaty provisions in the applicable national law. If the standard suite of rights granted ++ under applicable copyright law includes additional rights not granted under this License, ++ such additional rights are deemed to be included in the License; this License is not ++ intended to restrict the license of any rights under applicable law. ++ . ++ . ++ . ++ . ++ Creative Commons Notice ++ . ++ Creative Commons is not a party to this License, and makes no warranty whatsoever in connection with the ++ Work. Creative Commons will not be liable to You or any party on any legal theory for any damages ++ whatsoever, including without limitation any general, special, incidental or consequential damages ++ arising in connection to this license. Notwithstanding the foregoing two (2) sentences, if Creative ++ Commons has expressly identified itself as the Licensor hereunder, it shall have all rights and ++ obligations of Licensor. ++ . ++ Except for the limited purpose of indicating to the public that the Work is licensed under the CCPL, ++ Creative Commons does not authorize the use by either party of the trademark "Creative ++ Commons" or any related trademark or logo of Creative Commons without the prior written consent ++ of Creative Commons. Any permitted use will be in compliance with Creative Commons' then-current ++ trademark usage guidelines, as may be published on its website or otherwise made available upon ++ request from time to time. For the avoidance of doubt, this trademark restriction does not form part ++ of the License. ++ . ++ Creative Commons may be contacted at http://creativecommons.org/. diff --cc debian/gbp.conf index 00000000,00000000..9688b7a8 new file mode 100644 --- /dev/null +++ b/debian/gbp.conf @@@ -1,0 -1,0 +1,10 @@@ ++[DEFAULT] ++# the default branch for upstream sources: ++upstream-branch = master ++# the default branch for the debian patch: ++debian-branch = debian ++# the default tag formats used: ++upstream-tag = v%(version)s ++debian-tag = debian/%(version)s ++ ++ diff --cc debian/patches/968208_rounding.patch index 00000000,00000000..7a9c14e9 new file mode 100644 --- /dev/null +++ b/debian/patches/968208_rounding.patch @@@ -1,0 -1,0 +1,29 @@@ ++Description: Don't fail test for rounding difference on i386 ++ ++Author: Rebecca N. Palmer ++Bug-Debian: https://bugs.debian.org/968208 ++Forwarded: no ++ ++--- a/pandas/tests/window/moments/test_moments_rolling.py +++++ b/pandas/tests/window/moments/test_moments_rolling.py ++@@ -1,6 +1,9 @@ ++ import copy ++ from datetime import datetime ++ import warnings +++import sys +++import platform +++import re ++ ++ import numpy as np ++ from numpy.random import randn ++@@ -637,7 +640,9 @@ class TestMoments(Base): ++ if np.isnan(q1): ++ assert np.isnan(q2) ++ else: ++- assert q1 == q2 +++ assert np.abs(q1-q2)<1e-15 +++ if not (re.match('i.?86|x86',platform.uname()[4]) and sys.maxsize<2**33): +++ assert q1 == q2 ++ ++ def test_invalid_quantile_value(self): ++ data = np.arange(5) diff --cc debian/patches/contributor_list_not_in_tarball.patch index 00000000,00000000..460313b8 new file mode 100644 --- /dev/null +++ b/debian/patches/contributor_list_not_in_tarball.patch @@@ -1,0 -1,0 +1,28 @@@ ++Description: Don't try to read a contributor list from the git log ++ ++Debian packages are built from tarballs, so there isn't a git log. ++ ++Author: Rebecca N. Palmer ++Forwarded: not-needed ++ ++--- a/doc/sphinxext/contributors.py +++++ b/doc/sphinxext/contributors.py ++@@ -14,10 +14,8 @@ use:: ++ While the v0.23.1 tag does not exist, that will use the HEAD of the ++ branch as the end of the revision range. ++ """ ++-from announce import build_components ++ from docutils import nodes ++ from docutils.parsers.rst import Directive ++-import git ++ ++ ++ class ContributorsDirective(Directive): ++@@ -25,6 +23,7 @@ class ContributorsDirective(Directive): ++ name = "contributors" ++ ++ def run(self): +++ return [nodes.paragraph(), nodes.Text("For contributors, please see /usr/share/doc/contributors_list.txt or https://github.com/pandas-dev/pandas/graphs/contributors")] ++ range_ = self.arguments[0] ++ if range_.endswith("x..HEAD"): ++ return [nodes.paragraph(), nodes.bullet_list()] diff --cc debian/patches/deb_disable_googleanalytics.patch index 00000000,00000000..8604300f new file mode 100644 --- /dev/null +++ b/debian/patches/deb_disable_googleanalytics.patch @@@ -1,0 -1,0 +1,82 @@@ ++Author: Yaroslav Halchenko ++ Andreas Tille ++ Rebecca N. Palmer ++Last-Update: Mon, 23 Oct 2017 08:55:28 +0200 ++Description: Avoid privacy breach by Google Analytics ++Forwarded: not-needed ++ ++--- a/pandas/tests/io/data/html/spam.html +++++ b/pandas/tests/io/data/html/spam.html ++@@ -27,45 +27,9 @@ ++ ++ ++ ++- ++- +++ ++ ++ ++ ++@@ -794,4 +758,4 @@ handler: function() {this.cancel();}, ++   Software v.1.2.2 ++ ++ ++- ++\ No newline at end of file +++ ++--- a/doc/source/themes/nature_with_gtoc/layout.html +++++ b/doc/source/themes/nature_with_gtoc/layout.html ++@@ -95,14 +95,4 @@ $(document).ready(function() { ++ }); ++ ++ ++- ++- ++- ++- ++- ++ {% endblock %} diff --cc debian/patches/deb_doc_donotoverride_PYTHONPATH.patch index 00000000,00000000..dc39d226 new file mode 100644 --- /dev/null +++ b/debian/patches/deb_doc_donotoverride_PYTHONPATH.patch @@@ -1,0 -1,0 +1,21 @@@ ++Description: Don't try to import from the source directory ++ ++Needed as we build the extension modules elsewhere ++ ++Author: Yaroslav Halchenko ++Forwarded: not-needed ++ ++--- a/doc/make.py +++++ b/doc/make.py ++@@ -339,8 +339,9 @@ def main(): ++ # external libraries (namely Sphinx) to compile this module and resolve ++ # the import of `python_path` correctly. The latter is used to resolve ++ # the import within the module, injecting it into the global namespace ++- os.environ["PYTHONPATH"] = args.python_path ++- sys.path.insert(0, args.python_path) +++ # Debian: we set it outside +++ #os.environ["PYTHONPATH"] = args.python_path +++ #sys.path.insert(0, args.python_path) ++ globals()["pandas"] = importlib.import_module("pandas") ++ ++ # Set the matplotlib backend to the non-interactive Agg backend for all diff --cc debian/patches/deb_no_strict_data.patch index 00000000,00000000..5baa33d4 new file mode 100644 --- /dev/null +++ b/debian/patches/deb_no_strict_data.patch @@@ -1,0 -1,0 +1,19 @@@ ++Description: Don't fail tests because we don't ship test data ++ ++This applies to users running them; our build/autopkgtest re-enable ++this and use the data from the source tree ++ ++Author: Yaroslav Halchenko ++Forwarded: no ++ ++--- a/setup.cfg +++++ b/setup.cfg ++@@ -61,7 +61,7 @@ markers = ++ high_memory: mark a test as a high-memory only ++ clipboard: mark a pd.read_clipboard test ++ doctest_optionflags = NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL ++-addopts = --strict-data-files +++# addopts = --strict-data-files ++ xfail_strict = True ++ filterwarnings = ++ error:Sparse:FutureWarning diff --cc debian/patches/deb_nonversioneer_version.patch index 00000000,00000000..c44ea0e7 new file mode 100644 --- /dev/null +++ b/debian/patches/deb_nonversioneer_version.patch @@@ -1,0 -1,0 +1,46 @@@ ++Description: Don't try to use git to find the version number ++ ++Needed as Debian buildds use tarballs ++ ++Author: Yaroslav Halchenko ++Forwarded: not-needed ++ ++--- a/pandas/__init__.py +++++ b/pandas/__init__.py ++@@ -180,13 +180,7 @@ from pandas.util._tester import test ++ import pandas.testing ++ import pandas.arrays ++ ++-# use the closest tagged version if possible ++-from ._version import get_versions ++- ++-v = get_versions() ++-__version__ = v.get("closest-tag", v["version"]) ++-__git_version__ = v.get("full-revisionid") ++-del get_versions, v +++from .__version import version as __version__ ++ ++ # GH 27101 ++ # TODO: remove Panel compat in 1.0 ++--- a/pandas/tests/api/test_api.py +++++ b/pandas/tests/api/test_api.py ++@@ -202,7 +202,7 @@ class TestPDApi(Base): ++ "_testing", ++ "_tslib", ++ "_typing", ++- "_version", +++# not in Debian "_version", ++ ] ++ ++ def test_api(self): ++--- a/pandas/tests/test_common.py +++++ b/pandas/tests/test_common.py ++@@ -113,7 +113,7 @@ def test_standardize_mapping(): ++ dd = collections.defaultdict(list) ++ assert isinstance(com.standardize_mapping(dd), partial) ++ ++- +++@pytest.mark.xfail(reason="deb_nonversioneer_version patch") ++ def test_git_version(): ++ # GH 21295 ++ git_version = pd.__git_version__ diff --cc debian/patches/disable_pytest_asyncio.patch index 00000000,00000000..05332eff new file mode 100644 --- /dev/null +++ b/debian/patches/disable_pytest_asyncio.patch @@@ -1,0 -1,0 +1,23 @@@ ++Description: Disable pytest_asyncio ++ ++Debian currently has incompatible versions of pytest_asyncio and ++pytest, causing these tests to error on collection. ++ ++Author: Rebecca N. Palmer ++Bug-Debian: https://bugs.debian.org/969050 ++Forwarded: not-needed https://github.com/pandas-dev/pandas/pull/35757 ++ ++--- pandas-1.0.5+dfsg.orig/pandas/util/_test_decorators.py +++++ pandas-1.0.5+dfsg/pandas/util/_test_decorators.py ++@@ -255,10 +255,6 @@ def check_file_leaks(func) -> Callable: ++ ++ ++ def async_mark(): ++- try: ++- import_optional_dependency("pytest_asyncio") ++- async_mark = pytest.mark.asyncio ++- except ImportError: ++- async_mark = pytest.mark.skip(reason="Missing dependency pytest-asyncio") +++ async_mark = pytest.mark.skip(reason="https://bugs.debian.org/969050") ++ ++ return async_mark diff --cc debian/patches/find_test_data.patch index 00000000,00000000..6656fc08 new file mode 100644 --- /dev/null +++ b/debian/patches/find_test_data.patch @@@ -1,0 -1,0 +1,56 @@@ ++Description: Allow tests to use the data files in the source tree ++ ++We don't ship these in the package, ++but do want to run the tests that use them ++ ++Author: Rebecca N. Palmer ++Forwarded: not-needed ++ ++--- a/pandas/conftest.py +++++ b/pandas/conftest.py ++@@ -3,6 +3,7 @@ from datetime import date, time, timedel ++ from decimal import Decimal ++ import operator ++ import os +++import argparse ++ ++ from dateutil.tz import tzlocal, tzutc ++ import hypothesis ++@@ -44,6 +45,7 @@ def pytest_addoption(parser): ++ action="store_true", ++ help="Fail if a test is skipped for missing data file.", ++ ) +++ parser.addoption("--deb-data-root-dir",action="store",help=argparse.SUPPRESS)#for internal use of the Debian CI infrastructure, may change without warning. Security note: test_pickle can run arbitrary code from this directory ++ ++ ++ def pytest_runtest_setup(item): ++@@ -362,7 +364,7 @@ def strict_data_files(pytestconfig): ++ ++ ++ @pytest.fixture ++-def datapath(strict_data_files): +++def datapath(strict_data_files,pytestconfig): ++ """ ++ Get the path to a data file. ++ ++@@ -380,7 +382,9 @@ def datapath(strict_data_files): ++ ValueError ++ If the path doesn't exist and the --strict-data-files option is set. ++ """ ++- BASE_PATH = os.path.join(os.path.dirname(__file__), "tests") +++ BASE_PATH = pytestconfig.getoption("--deb-data-root-dir",default=None) +++ if BASE_PATH is None: +++ BASE_PATH = os.path.join(os.path.dirname(__file__), "tests") ++ ++ def deco(*args): ++ path = os.path.join(BASE_PATH, *args) ++--- a/pandas/tests/util/test_util.py +++++ b/pandas/tests/util/test_util.py ++@@ -57,6 +57,7 @@ def test_datapath_missing(datapath): ++ datapath("not_a_file") ++ ++ +++@pytest.mark.xfail(reason="--deb-data-root-dir intentionally breaks this",strict=False) ++ def test_datapath(datapath): ++ args = ("io", "data", "csv", "iris.csv") ++ diff --cc debian/patches/fix_is_warnings.patch index 00000000,00000000..ecef9fda new file mode 100644 --- /dev/null +++ b/debian/patches/fix_is_warnings.patch @@@ -1,0 -1,0 +1,29 @@@ ++Description: Don't use 'is' with literals ++ ++It causes a SyntaxWarning on install, and possibly undefined test results ++ ++Author: Rebecca N. Palmer ++Bug-Debian: https://bugs.debian.org/956021 ++Forwarded: https://github.com/pandas-dev/pandas/pull/33322 ++ ++--- a/pandas/tests/frame/test_alter_axes.py +++++ b/pandas/tests/frame/test_alter_axes.py ++@@ -236,9 +236,16 @@ class TestDataFrameAlterAxes: ++ ++ # need to adapt first drop for case that both keys are 'A' -- ++ # cannot drop the same column twice; ++- # use "is" because == would give ambiguous Boolean error for containers +++ # plain == would give ambiguous Boolean error for containers ++ first_drop = ( ++- False if (keys[0] is "A" and keys[1] is "A") else drop # noqa: F632 +++ False +++ if ( +++ type(keys[0]) == str +++ and keys[0] == "A" +++ and type(keys[1]) == str +++ and keys[1] == "A" +++ ) +++ else drop ++ ) ++ # to test against already-tested behaviour, we add sequentially, ++ # hence second append always True; must wrap keys in list, otherwise diff --cc debian/patches/fix_random_seeds.patch index 00000000,00000000..c07ee1e7 new file mode 100644 --- /dev/null +++ b/debian/patches/fix_random_seeds.patch @@@ -1,0 -1,0 +1,35 @@@ ++Description: Use fixed seeds for reproducible pseudorandomness ++ ++Author: Rebecca N. Palmer ++Forwarded: no ++ ++--- a/doc/source/getting_started/comparison/comparison_with_r.rst +++++ b/doc/source/getting_started/comparison/comparison_with_r.rst ++@@ -226,6 +226,7 @@ In ``pandas`` we may use :meth:`~pandas. ++ ++ import random ++ import string +++ random.seed(123456) # for reproducibility ++ ++ baseball = pd.DataFrame( ++ {'team': ["team %d" % (x + 1) for x in range(5)] * 5, ++--- a/doc/source/user_guide/advanced.rst +++++ b/doc/source/user_guide/advanced.rst ++@@ -584,6 +584,7 @@ they need to be sorted. As with any inde ++ .. ipython:: python ++ ++ import random +++ random.seed(123456) # for reproducibility ++ random.shuffle(tuples) ++ s = pd.Series(np.random.randn(8), index=pd.MultiIndex.from_tuples(tuples)) ++ s ++--- a/doc/source/user_guide/visualization.rst +++++ b/doc/source/user_guide/visualization.rst ++@@ -992,6 +992,7 @@ are what constitutes the bootstrap plot. ++ :suppress: ++ ++ np.random.seed(123456) +++ random.seed(123456) # for reproducibility - bootstrap_plot uses random.sample ++ ++ .. ipython:: python ++ diff --cc debian/patches/fix_shebangs.patch index 00000000,00000000..8741c4d1 new file mode 100644 --- /dev/null +++ b/debian/patches/fix_shebangs.patch @@@ -1,0 -1,0 +1,101 @@@ ++Description: Use Python 3 shebangs and subprocess calls ++ ++Author: Rebecca N. Palmer ++Forwarded: accepted for 1.1 https://github.com/pandas-dev/pandas/pull/31147 ++ ++--- a/ci/print_skipped.py +++++ b/ci/print_skipped.py ++@@ -1,4 +1,4 @@ ++-#!/usr/bin/env python +++#!/usr/bin/env python3 ++ import os ++ import xml.etree.ElementTree as et ++ ++--- a/doc/make.py +++++ b/doc/make.py ++@@ -1,4 +1,4 @@ ++-#!/usr/bin/env python +++#!/usr/bin/env python3 ++ """ ++ Python script for building documentation. ++ ++--- a/pandas/core/computation/eval.py +++++ b/pandas/core/computation/eval.py ++@@ -1,4 +1,4 @@ ++-#!/usr/bin/env python +++#!/usr/bin/env python3 ++ ++ """ ++ Top level ``eval`` module. ++--- a/pandas/tests/io/generate_legacy_storage_files.py +++++ b/pandas/tests/io/generate_legacy_storage_files.py ++@@ -1,4 +1,4 @@ ++-#!/usr/bin/env python +++#!/usr/bin/env python3 ++ ++ """ ++ self-contained to write legacy storage pickle files ++--- a/pandas/tests/plotting/common.py +++++ b/pandas/tests/plotting/common.py ++@@ -1,4 +1,4 @@ ++-#!/usr/bin/env python +++#!/usr/bin/env python3 ++ # coding: utf-8 ++ ++ import os ++--- a/scripts/download_wheels.py +++++ b/scripts/download_wheels.py ++@@ -1,4 +1,4 @@ ++-#!/usr/bin/env python +++#!/usr/bin/env python3 ++ """Fetch wheels from wheels.scipy.org for a pandas version.""" ++ import argparse ++ import pathlib ++--- a/scripts/validate_docstrings.py +++++ b/scripts/validate_docstrings.py ++@@ -1,4 +1,4 @@ ++-#!/usr/bin/env python +++#!/usr/bin/env python3 ++ """ ++ Analyze docstrings to detect errors. ++ ++--- a/setup.py +++++ b/setup.py ++@@ -1,4 +1,4 @@ ++-#!/usr/bin/env python +++#!/usr/bin/env python3 ++ ++ """ ++ Parts of this file were taken from the pyzmq project ++--- a/doc/sphinxext/announce.py +++++ b/doc/sphinxext/announce.py ++@@ -1,4 +1,4 @@ ++-#!/usr/bin/env python +++#!/usr/bin/env python3 ++ # -*- encoding:utf-8 -*- ++ """ ++ Script to generate contributor and pull request lists ++--- a/scripts/generate_pip_deps_from_conda.py +++++ b/scripts/generate_pip_deps_from_conda.py ++@@ -1,4 +1,4 @@ ++-#!/usr/bin/env python +++#!/usr/bin/env python3 ++ """ ++ Convert the conda environment.yml to the pip requirements-dev.txt, ++ or check that they have the same packages (for the CI) ++--- a/scripts/find_commits_touching_func.py +++++ b/scripts/find_commits_touching_func.py ++@@ -1,4 +1,4 @@ ++-#!/usr/bin/env python +++#!/usr/bin/env python3 ++ # copyright 2013, y-p @ github ++ """ ++ Search the git history for all commits touching a named method ++--- a/scripts/validate_string_concatenation.py +++++ b/scripts/validate_string_concatenation.py ++@@ -1,4 +1,4 @@ ++-#!/usr/bin/env python +++#!/usr/bin/env python3 ++ """ ++ GH #30454 ++ diff --cc debian/patches/hurd_compat.patch index 00000000,00000000..872a0b7b new file mode 100644 --- /dev/null +++ b/debian/patches/hurd_compat.patch @@@ -1,0 -1,0 +1,56 @@@ ++Description: Avoid test failures on Hurd ++ ++Allow multiprocessing to be unavailable ++Accept any errno not just 2 for (intentionally) nonexistent files ++(Hurd appears to use 2**30+2) ++ ++Author: Rebecca N. Palmer ++Forwarded: no ++ ++--- pandas-1.0.5+dfsg.orig/pandas/tests/io/parser/test_common.py +++++ pandas-1.0.5+dfsg/pandas/tests/io/parser/test_common.py ++@@ -960,7 +960,7 @@ def test_nonexistent_path(all_parsers): ++ parser = all_parsers ++ path = "{}.csv".format(tm.rands(10)) ++ ++- msg = f"File {path} does not exist" if parser.engine == "c" else r"\[Errno 2\]" +++ msg = f"File {path} does not exist" if parser.engine == "c" else r"\[Errno 2\]|\[Errno [0-9]+\] No such file or directory" ++ with pytest.raises(FileNotFoundError, match=msg) as e: ++ parser.read_csv(path) ++ ++--- pandas-1.0.5+dfsg.orig/pandas/tests/io/parser/test_multi_thread.py +++++ pandas-1.0.5+dfsg/pandas/tests/io/parser/test_multi_thread.py ++@@ -3,7 +3,12 @@ Tests multithreading behaviour for readi ++ parsing files for each parser defined in parsers.py ++ """ ++ from io import BytesIO ++-from multiprocessing.pool import ThreadPool +++import pytest +++try: +++ from multiprocessing.pool import ThreadPool +++ ThreadPool() +++except ImportError: +++ pytest.skip(reason="multiprocessing not available",allow_module_level=True) ++ ++ import numpy as np ++ ++--- pandas-1.0.5+dfsg.orig/pandas/tests/io/test_common.py +++++ pandas-1.0.5+dfsg/pandas/tests/io/test_common.py ++@@ -142,7 +142,7 @@ bar2,12,13,14,15 ++ ++ path = os.path.join(HERE, "data", "does_not_exist." + fn_ext) ++ msg1 = r"File (b')?.+does_not_exist\.{}'? does not exist".format(fn_ext) ++- msg2 = fr"\[Errno 2\] No such file or directory: '.+does_not_exist\.{fn_ext}'" +++ msg2 = fr"\[Errno [0-9]+\] No such file or directory: '.+does_not_exist\.{fn_ext}'" ++ msg3 = "Expected object or value" ++ msg4 = "path_or_buf needs to be a string file path or file-like" ++ msg5 = ( ++@@ -182,7 +182,7 @@ bar2,12,13,14,15 ++ monkeypatch.setattr(icom, "_expand_user", lambda x: os.path.join("foo", x)) ++ ++ msg1 = fr"File (b')?.+does_not_exist\.{fn_ext}'? does not exist" ++- msg2 = fr"\[Errno 2\] No such file or directory: '.+does_not_exist\.{fn_ext}'" +++ msg2 = fr"\[Errno [0-9]+\] No such file or directory: '.+does_not_exist\.{fn_ext}'" ++ msg3 = "Unexpected character found when decoding 'false'" ++ msg4 = "path_or_buf needs to be a string file path or file-like" ++ msg5 = ( diff --cc debian/patches/link_security.patch index 00000000,00000000..907d8aad new file mode 100644 --- /dev/null +++ b/debian/patches/link_security.patch @@@ -1,0 -1,0 +1,1310 @@@ ++Description: Use https for links where possible ++ ++Also fix some (semi-)broken links found while checking https availability. ++ ++Author: Rebecca N. Palmer ++Forwarded: accepted for 1.1 https://github.com/pandas-dev/pandas/pull/31145 ++ ++--- a/.github/CODE_OF_CONDUCT.md +++++ b/.github/CODE_OF_CONDUCT.md ++@@ -54,10 +54,10 @@ incident. ++ ++ This Code of Conduct is adapted from the [Contributor Covenant][homepage], ++ version 1.3.0, available at ++-[http://contributor-covenant.org/version/1/3/0/][version], +++[https://www.contributor-covenant.org/version/1/3/0/][version], ++ and the [Swift Code of Conduct][swift]. ++ ++-[homepage]: http://contributor-covenant.org ++-[version]: http://contributor-covenant.org/version/1/3/0/ +++[homepage]: https://www.contributor-covenant.org +++[version]: https://www.contributor-covenant.org/version/1/3/0/ ++ [swift]: https://swift.org/community/#code-of-conduct ++ ++--- a/.github/CONTRIBUTING.md +++++ b/.github/CONTRIBUTING.md ++@@ -16,7 +16,7 @@ If you notice a bug in the code or docum ++ ++ ## Contributing to the Codebase ++ ++-The code is hosted on [GitHub](https://www.github.com/pandas-dev/pandas), so you will need to use [Git](http://git-scm.com/) to clone the project and make changes to the codebase. Once you have obtained a copy of the code, you should create a development environment that is separate from your existing Python environment so that you can make and test changes without compromising your own work environment. For more information, please refer to the "[Working with the code](https://github.com/pandas-dev/pandas/blob/master/doc/source/development/contributing.rst#working-with-the-code)" section. +++The code is hosted on [GitHub](https://www.github.com/pandas-dev/pandas), so you will need to use [Git](https://git-scm.com/) to clone the project and make changes to the codebase. Once you have obtained a copy of the code, you should create a development environment that is separate from your existing Python environment so that you can make and test changes without compromising your own work environment. For more information, please refer to the "[Working with the code](https://github.com/pandas-dev/pandas/blob/master/doc/source/development/contributing.rst#working-with-the-code)" section. ++ ++ Before submitting your changes for review, make sure to check that your changes do not break any tests. You can find more information about our test suites in the "[Test-driven development/code writing](https://github.com/pandas-dev/pandas/blob/master/doc/source/development/contributing.rst#test-driven-development-code-writing)" section. We also have guidelines regarding coding style that will be enforced during testing, which can be found in the "[Code standards](https://github.com/pandas-dev/pandas/blob/master/doc/source/development/contributing.rst#code-standards)" section. ++ ++--- a/AUTHORS.md +++++ b/AUTHORS.md ++@@ -14,7 +14,7 @@ About the Copyright Holders ++ The PyData Development Team is the collection of developers of the PyData ++ project. This includes all of the PyData sub-projects, including pandas. The ++ core team that coordinates development on GitHub can be found here: ++- http://github.com/pydata. +++ https://github.com/pydata. ++ ++ Full credits for pandas contributors can be found in the documentation. ++ ++--- a/RELEASE.md +++++ b/RELEASE.md ++@@ -3,4 +3,4 @@ Release Notes ++ ++ The list of changes to Pandas between each release can be found ++ [here](https://pandas.pydata.org/pandas-docs/stable/whatsnew/index.html). For full ++-details, see the commit logs at http://github.com/pandas-dev/pandas. +++details, see the commit logs at https://github.com/pandas-dev/pandas. ++--- a/asv_bench/benchmarks/pandas_vb_common.py +++++ b/asv_bench/benchmarks/pandas_vb_common.py ++@@ -56,7 +56,7 @@ except AttributeError: ++ def setup(*args, **kwargs): ++ # This function just needs to be imported into each benchmark file to ++ # set up the random seed before each function. ++- # http://asv.readthedocs.io/en/latest/writing_benchmarks.html +++ # https://asv.readthedocs.io/en/latest/writing_benchmarks.html ++ np.random.seed(1234) ++ ++ ++--- a/doc/cheatsheet/README.txt +++++ b/doc/cheatsheet/README.txt ++@@ -5,4 +5,4 @@ and pick "PDF" as the format. ++ This cheat sheet was inspired by the RStudio Data Wrangling Cheatsheet[1], written by Irv Lustig, Princeton Consultants[2]. ++ ++ [1]: https://www.rstudio.com/wp-content/uploads/2015/02/data-wrangling-cheatsheet.pdf ++-[2]: http://www.princetonoptimization.com/ +++[2]: https://www.princetonoptimization.com/ ++--- a/doc/source/conf.py +++++ b/doc/source/conf.py ++@@ -632,10 +632,10 @@ def linkcode_resolve(domain, info): ++ fn = os.path.relpath(fn, start=os.path.dirname(pandas.__file__)) ++ ++ if "+" in pandas.__version__: ++- return f"http://github.com/pandas-dev/pandas/blob/master/pandas/{fn}{linespec}" +++ return f"https://github.com/pandas-dev/pandas/blob/master/pandas/{fn}{linespec}" ++ else: ++ return ( ++- f"http://github.com/pandas-dev/pandas/blob/" +++ f"https://github.com/pandas-dev/pandas/blob/" ++ f"v{pandas.__version__}/pandas/{fn}{linespec}" ++ ) ++ ++@@ -702,7 +702,7 @@ def rstjinja(app, docname, source): ++ """ ++ Render our pages as a jinja template for fancy templating goodness. ++ """ ++- # http://ericholscher.com/blog/2016/jul/25/integrating-jinja-rst-sphinx/ +++ # https://www.ericholscher.com/blog/2016/jul/25/integrating-jinja-rst-sphinx/ ++ # Make sure we're outputting HTML ++ if app.builder.format != "html": ++ return ++--- a/doc/source/development/contributing.rst +++++ b/doc/source/development/contributing.rst ++@@ -56,7 +56,7 @@ Bug reports and enhancement requests ++ Bug reports are an important part of making *pandas* more stable. Having a complete bug report ++ will allow others to reproduce the bug and provide insight into fixing. See ++ `this stackoverflow article `_ and ++-`this blogpost `_ +++`this blogpost `_ ++ for tips on writing a good bug report. ++ ++ Trying the bug-producing code out on the *master* branch is often a worthwhile exercise ++@@ -67,7 +67,7 @@ Bug reports must: ++ ++ #. Include a short, self-contained Python snippet reproducing the problem. ++ You can format the code nicely by using `GitHub Flavored Markdown ++- `_:: +++ `_:: ++ ++ ```python ++ >>> from pandas import DataFrame ++@@ -104,19 +104,19 @@ feel free to ask for help. ++ ++ The code is hosted on `GitHub `_. To ++ contribute you will need to sign up for a `free GitHub account ++-`_. We use `Git `_ for +++`_. We use `Git `_ for ++ version control to allow many people to work together on the project. ++ ++ Some great resources for learning Git: ++ ++-* the `GitHub help pages `_. ++-* the `NumPy's documentation `_. ++-* Matthew Brett's `Pydagogue `_. +++* the `GitHub help pages `_. +++* the `NumPy's documentation `_. +++* Matthew Brett's `Pydagogue `_. ++ ++ Getting started with Git ++ ------------------------ ++ ++-`GitHub has instructions `__ for installing git, +++`GitHub has instructions `__ for installing git, ++ setting up your SSH key, and configuring git. All these steps need to be completed before ++ you can work seamlessly between your local repository and GitHub. ++ ++@@ -249,7 +249,7 @@ To return to your root environment:: ++ ++ conda deactivate ++ ++-See the full conda docs `here `__. +++See the full conda docs `here `__. ++ ++ .. _contributing.pip: ++ ++@@ -354,7 +354,7 @@ About the *pandas* documentation ++ -------------------------------- ++ ++ The documentation is written in **reStructuredText**, which is almost like writing ++-in plain English, and built using `Sphinx `__. The +++in plain English, and built using `Sphinx `__. The ++ Sphinx Documentation has an excellent `introduction to reST ++ `__. Review the Sphinx docs to perform more ++ complex changes to the documentation as well. ++@@ -379,7 +379,7 @@ Some other important things to know abou ++ contributing_docstring.rst ++ ++ * The tutorials make heavy use of the `ipython directive ++- `_ sphinx extension. +++ `_ sphinx extension. ++ This directive lets you put code in the documentation which will be run ++ during the doc build. For example:: ++ ++@@ -425,7 +425,7 @@ Some other important things to know abou ++ The ``.rst`` files are used to automatically generate Markdown and HTML versions ++ of the docs. For this reason, please do not edit ``CONTRIBUTING.md`` directly, ++ but instead make any changes to ``doc/source/development/contributing.rst``. Then, to ++- generate ``CONTRIBUTING.md``, use `pandoc `_ +++ generate ``CONTRIBUTING.md``, use `pandoc `_ ++ with the following command:: ++ ++ pandoc doc/source/development/contributing.rst -t markdown_github > CONTRIBUTING.md ++@@ -609,8 +609,8 @@ You can also run this command on an enti ++ cpplint --extensions=c,h --headers=h --filter=-readability/casting,-runtime/int,-build/include_subdir --recursive modified-c-directory ++ ++ To make your commits compliant with this standard, you can install the ++-`ClangFormat `_ tool, which can be ++-downloaded `here `__. To configure, in your home directory, +++`ClangFormat `_ tool, which can be +++downloaded `here `__. To configure, in your home directory, ++ run the following command:: ++ ++ clang-format style=google -dump-config > .clang-format ++@@ -638,7 +638,7 @@ fixes manually. ++ Python (PEP8 / black) ++ ~~~~~~~~~~~~~~~~~~~~~ ++ ++-*pandas* follows the `PEP8 `_ standard +++*pandas* follows the `PEP8 `_ standard ++ and uses `Black `_ and ++ `Flake8 `_ to ensure a consistent code ++ format throughout the project. ++@@ -939,9 +939,9 @@ Adding tests is one of the most common r ++ it is worth getting in the habit of writing tests ahead of time so this is never an issue. ++ ++ Like many packages, *pandas* uses `pytest ++-`_ and the convenient +++`_ and the convenient ++ extensions in `numpy.testing ++-`_. +++`_. ++ ++ .. note:: ++ ++@@ -992,7 +992,7 @@ Transitioning to ``pytest`` ++ class TestReallyCoolFeature: ++ pass ++ ++-Going forward, we are moving to a more *functional* style using the `pytest `__ framework, which offers a richer testing +++Going forward, we are moving to a more *functional* style using the `pytest `__ framework, which offers a richer testing ++ framework that will facilitate testing and developing. Thus, instead of writing test classes, we will write test functions like this: ++ ++ .. code-block:: python ++@@ -1225,7 +1225,7 @@ On Windows, one can type:: ++ This can significantly reduce the time it takes to locally run tests before ++ submitting a pull request. ++ ++-For more, see the `pytest `_ documentation. +++For more, see the `pytest `_ documentation. ++ ++ Furthermore one can run ++ ++--- a/doc/source/development/contributing_docstring.rst +++++ b/doc/source/development/contributing_docstring.rst ++@@ -77,8 +77,8 @@ language that allows encoding styles in ++ about reStructuredText can be found in: ++ ++ * `Sphinx reStructuredText primer `_ ++-* `Quick reStructuredText reference `_ ++-* `Full reStructuredText specification `_ +++* `Quick reStructuredText reference `_ +++* `Full reStructuredText specification `_ ++ ++ Pandas has some helpers for sharing docstrings between related classes, see ++ :ref:`docstring.sharing`. ++--- a/doc/source/development/extending.rst +++++ b/doc/source/development/extending.rst ++@@ -306,7 +306,7 @@ Subclassing pandas data structures ++ ++ 1. Extensible method chains with :ref:`pipe ` ++ ++- 2. Use *composition*. See `here `_. +++ 2. Use *composition*. See `here `_. ++ ++ 3. Extending by :ref:`registering an accessor ` ++ ++--- a/doc/source/development/maintaining.rst +++++ b/doc/source/development/maintaining.rst ++@@ -36,7 +36,7 @@ of what it means to be a maintainer. ++ * Provide experience / wisdom on API design questions to ensure consistency and maintainability ++ * Project organization (run / attend developer meetings, represent pandas) ++ ++-http://matthewrocklin.com/blog/2019/05/18/maintainer may be interesting background +++https://matthewrocklin.com/blog/2019/05/18/maintainer may be interesting background ++ reading. ++ ++ .. _maintaining.triage: ++@@ -78,7 +78,7 @@ Here's a typical workflow for triaging a ++ 4. **Is the issue minimal and reproducible**? ++ ++ For bug reports, we ask that the reporter provide a minimal reproducible ++- example. See http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports +++ example. See https://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports ++ for a good explanation. If the example is not reproducible, or if it's ++ *clearly* not minimal, feel free to ask the reporter if they can provide ++ and example or simplify the provided one. Do acknowledge that writing ++--- a/doc/source/ecosystem.rst +++++ b/doc/source/ecosystem.rst ++@@ -293,8 +293,8 @@ dimensional arrays, rather than the tabu ++ Out-of-core ++ ------------- ++ ++-`Blaze `__ ++-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +++`Blaze `__ +++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ++ ++ Blaze provides a standard API for doing computations with various ++ in-memory and on-disk backends: NumPy, Pandas, SQLAlchemy, MongoDB, PyTables, ++--- a/doc/source/getting_started/comparison/comparison_with_r.rst +++++ b/doc/source/getting_started/comparison/comparison_with_r.rst ++@@ -6,9 +6,9 @@ Comparison with R / R libraries ++ ******************************* ++ ++ Since ``pandas`` aims to provide a lot of the data manipulation and analysis ++-functionality that people use `R `__ for, this page +++functionality that people use `R `__ for, this page ++ was started to provide a more detailed look at the `R language ++-`__ and its many third +++`__ and its many third ++ party libraries as they relate to ``pandas``. In comparisons with R and CRAN ++ libraries, we care about the following things: ++ ++@@ -518,37 +518,37 @@ For more details and examples see :ref:` ++ ++ ++ .. |c| replace:: ``c`` ++-.. _c: http://stat.ethz.ch/R-manual/R-patched/library/base/html/c.html +++.. _c: https://stat.ethz.ch/R-manual/R-patched/library/base/html/c.html ++ ++ .. |aggregate| replace:: ``aggregate`` ++-.. _aggregate: http://finzi.psych.upenn.edu/R/library/stats/html/aggregate.html +++.. _aggregate: https://stat.ethz.ch/R-manual/R-patched/library/stats/html/aggregate.html ++ ++ .. |match| replace:: ``match`` / ``%in%`` ++-.. _match: http://finzi.psych.upenn.edu/R/library/base/html/match.html +++.. _match: https://stat.ethz.ch/R-manual/R-patched/library/base/html/match.html ++ ++ .. |tapply| replace:: ``tapply`` ++-.. _tapply: http://finzi.psych.upenn.edu/R/library/base/html/tapply.html +++.. _tapply: https://stat.ethz.ch/R-manual/R-patched/library/base/html/tapply.html ++ ++ .. |with| replace:: ``with`` ++-.. _with: http://finzi.psych.upenn.edu/R/library/base/html/with.html +++.. _with: https://stat.ethz.ch/R-manual/R-patched/library/base/html/with.html ++ ++ .. |subset| replace:: ``subset`` ++-.. _subset: http://finzi.psych.upenn.edu/R/library/base/html/subset.html +++.. _subset: https://stat.ethz.ch/R-manual/R-patched/library/base/html/subset.html ++ ++ .. |ddply| replace:: ``ddply`` ++-.. _ddply: http://www.inside-r.org/packages/cran/plyr/docs/ddply +++.. _ddply: https://cran.r-project.org/web/packages/plyr/plyr.pdf#Rfn.ddply.1 ++ ++ .. |meltarray| replace:: ``melt.array`` ++-.. _meltarray: http://www.inside-r.org/packages/cran/reshape2/docs/melt.array +++.. _meltarray: https://cran.r-project.org/web/packages/reshape2/reshape2.pdf#Rfn.melt.array.1 ++ ++ .. |meltlist| replace:: ``melt.list`` ++-.. meltlist: http://www.inside-r.org/packages/cran/reshape2/docs/melt.list +++.. meltlist: https://cran.r-project.org/web/packages/reshape2/reshape2.pdf#Rfn.melt.list.1 ++ ++ .. |meltdf| replace:: ``melt.data.frame`` ++-.. meltdf: http://www.inside-r.org/packages/cran/reshape2/docs/melt.data.frame +++.. meltdf: https://cran.r-project.org/web/packages/reshape2/reshape2.pdf#Rfn.melt.data.frame.1 ++ ++ .. |cast| replace:: ``cast`` ++-.. cast: http://www.inside-r.org/packages/cran/reshape2/docs/cast +++.. cast: https://cran.r-project.org/web/packages/reshape2/reshape2.pdf#Rfn.cast.1 ++ ++ .. |factor| replace:: ``factor`` ++ .. _factor: https://stat.ethz.ch/R-manual/R-devel/library/base/html/factor.html ++--- a/doc/source/getting_started/comparison/comparison_with_stata.rst +++++ b/doc/source/getting_started/comparison/comparison_with_stata.rst ++@@ -673,6 +673,6 @@ Disk vs memory ++ Pandas and Stata both operate exclusively in memory. This means that the size of ++ data able to be loaded in pandas is limited by your machine's memory. ++ If out of core processing is needed, one possibility is the ++-`dask.dataframe `_ +++`dask.dataframe `_ ++ library, which provides a subset of pandas functionality for an ++ on-disk ``DataFrame``. ++--- a/doc/source/getting_started/dsintro.rst +++++ b/doc/source/getting_started/dsintro.rst ++@@ -610,7 +610,7 @@ union of the column and row labels. ++ ++ When doing an operation between DataFrame and Series, the default behavior is ++ to align the Series **index** on the DataFrame **columns**, thus `broadcasting ++-`__ +++`__ ++ row-wise. For example: ++ ++ .. ipython:: python ++--- a/doc/source/getting_started/install.rst +++++ b/doc/source/getting_started/install.rst ++@@ -7,13 +7,13 @@ Installation ++ ============ ++ ++ The easiest way to install pandas is to install it ++-as part of the `Anaconda `__ distribution, a +++as part of the `Anaconda `__ distribution, a ++ cross platform distribution for data analysis and scientific computing. ++ This is the recommended installation method for most users. ++ ++ Instructions for installing from source, ++ `PyPI `__, `ActivePython `__, various Linux distributions, or a ++-`development version `__ are also provided. +++`development version `__ are also provided. ++ ++ Python version support ++ ---------------------- ++@@ -28,28 +28,28 @@ Installing pandas ++ Installing with Anaconda ++ ~~~~~~~~~~~~~~~~~~~~~~~~ ++ ++-Installing pandas and the rest of the `NumPy `__ and ++-`SciPy `__ stack can be a little +++Installing pandas and the rest of the `NumPy `__ and +++`SciPy `__ stack can be a little ++ difficult for inexperienced users. ++ ++ The simplest way to install not only pandas, but Python and the most popular ++-packages that make up the `SciPy `__ stack ++-(`IPython `__, `NumPy `__, ++-`Matplotlib `__, ...) is with ++-`Anaconda `__, a cross-platform +++packages that make up the `SciPy `__ stack +++(`IPython `__, `NumPy `__, +++`Matplotlib `__, ...) is with +++`Anaconda `__, a cross-platform ++ (Linux, Mac OS X, Windows) Python distribution for data analytics and ++ scientific computing. ++ ++ After running the installer, the user will have access to pandas and the ++-rest of the `SciPy `__ stack without needing to install +++rest of the `SciPy `__ stack without needing to install ++ anything else, and without needing to wait for any software to be compiled. ++ ++-Installation instructions for `Anaconda `__ ++-`can be found here `__. +++Installation instructions for `Anaconda `__ +++`can be found here `__. ++ ++ A full list of the packages available as part of the ++-`Anaconda `__ distribution ++-`can be found here `__. +++`Anaconda `__ distribution +++`can be found here `__. ++ ++ Another advantage to installing Anaconda is that you don't need ++ admin rights to install it. Anaconda can install in the user's home directory, ++@@ -62,28 +62,28 @@ Installing with Miniconda ++ ~~~~~~~~~~~~~~~~~~~~~~~~~ ++ ++ The previous section outlined how to get pandas installed as part of the ++-`Anaconda `__ distribution. +++`Anaconda `__ distribution. ++ However this approach means you will install well over one hundred packages ++ and involves downloading the installer which is a few hundred megabytes in size. ++ ++ If you want to have more control on which packages, or have a limited internet ++ bandwidth, then installing pandas with ++-`Miniconda `__ may be a better solution. +++`Miniconda `__ may be a better solution. ++ ++-`Conda `__ is the package manager that the ++-`Anaconda `__ distribution is built upon. +++`Conda `__ is the package manager that the +++`Anaconda `__ distribution is built upon. ++ It is a package manager that is both cross-platform and language agnostic ++ (it can play a similar role to a pip and virtualenv combination). ++ ++-`Miniconda `__ allows you to create a +++`Miniconda `__ allows you to create a ++ minimal self contained Python installation, and then use the ++-`Conda `__ command to install additional packages. +++`Conda `__ command to install additional packages. ++ ++-First you will need `Conda `__ to be installed and +++First you will need `Conda `__ to be installed and ++ downloading and running the `Miniconda ++-`__ +++`__ ++ will do this for you. The installer ++-`can be found here `__ +++`can be found here `__ ++ ++ The next step is to create a new conda environment. A conda environment is like a ++ virtualenv that allows you to specify a specific version of Python and set of libraries. ++@@ -113,7 +113,7 @@ To install other packages, IPython for e ++ ++ conda install ipython ++ ++-To install the full `Anaconda `__ +++To install the full `Anaconda `__ ++ distribution:: ++ ++ conda install anaconda ++@@ -153,10 +153,10 @@ To install pandas for Python 2, you may ++ :widths: 10, 10, 20, 50 ++ ++ ++- Debian, stable, `official Debian repository `__ , ``sudo apt-get install python3-pandas`` +++ Debian, stable, `official Debian repository `__ , ``sudo apt-get install python3-pandas`` ++ Debian & Ubuntu, unstable (latest packages), `NeuroDebian `__ , ``sudo apt-get install python3-pandas`` ++- Ubuntu, stable, `official Ubuntu repository `__ , ``sudo apt-get install python3-pandas`` ++- OpenSuse, stable, `OpenSuse Repository `__ , ``zypper in python3-pandas`` +++ Ubuntu, stable, `official Ubuntu repository `__ , ``sudo apt-get install python3-pandas`` +++ OpenSuse, stable, `OpenSuse Repository `__ , ``zypper in python3-pandas`` ++ Fedora, stable, `official Fedora repository `__ , ``dnf install python3-pandas`` ++ Centos/RHEL, stable, `EPEL repository `__ , ``yum install python3-pandas`` ++ ++@@ -177,7 +177,7 @@ pandas is equipped with an exhaustive se ++ the code base as of this writing. To run it on your machine to verify that ++ everything is working (and that you have all of the dependencies, soft and hard, ++ installed), make sure you have `pytest ++-`__ >= 5.0.1 and `Hypothesis +++`__ >= 5.0.1 and `Hypothesis ++ `__ >= 3.58, then run: ++ ++ :: ++@@ -204,9 +204,9 @@ Dependencies ++ Package Minimum supported version ++ ================================================================ ========================== ++ `setuptools `__ 24.2.0 ++-`NumPy `__ 1.13.3 +++`NumPy `__ 1.13.3 ++ `python-dateutil `__ 2.6.1 ++-`pytz `__ 2017.2 +++`pytz `__ 2017.2 ++ ================================================================ ========================== ++ ++ .. _install.recommended_dependencies: ++@@ -303,6 +303,6 @@ top-level :func:`~pandas.read_html` func ++ usage of the above three libraries. ++ ++ .. _html5lib: https://github.com/html5lib/html5lib-python ++-.. _BeautifulSoup4: http://www.crummy.com/software/BeautifulSoup ++-.. _lxml: http://lxml.de +++.. _BeautifulSoup4: https://www.crummy.com/software/BeautifulSoup +++.. _lxml: https://lxml.de ++ .. _tabulate: https://github.com/astanin/python-tabulate ++--- a/doc/source/getting_started/tutorials.rst +++++ b/doc/source/getting_started/tutorials.rst ++@@ -23,12 +23,12 @@ Community guides ++ pandas Cookbook by Julia Evans ++ ------------------------------ ++ ++-The goal of this 2015 cookbook (by `Julia Evans `_) is to +++The goal of this 2015 cookbook (by `Julia Evans `_) is to ++ give you some concrete examples for getting started with pandas. These ++ are examples with real-world data, and all the bugs and weirdness that ++ entails. ++ For the table of contents, see the `pandas-cookbook GitHub ++-repository `_. +++repository `_. ++ ++ Learn Pandas by Hernan Rojas ++ ---------------------------- ++@@ -38,10 +38,10 @@ A set of lesson for new pandas users: ht ++ Practical data analysis with Python ++ ----------------------------------- ++ ++-This `guide `_ is an introduction to the data analysis process using the Python data ecosystem and an interesting open dataset. ++-There are four sections covering selected topics as `munging data `__, ++-`aggregating data `_, `visualizing data `_ ++-and `time series `_. +++This `guide `_ is an introduction to the data analysis process using the Python data ecosystem and an interesting open dataset. +++There are four sections covering selected topics as `munging data `__, +++`aggregating data `_, `visualizing data `_ +++and `time series `_. ++ ++ .. _tutorial-exercises-new-users: ++ ++@@ -61,13 +61,13 @@ Tutorial series written in 2016 by ++ The source may be found in the GitHub repository ++ `TomAugspurger/effective-pandas `_. ++ ++-* `Modern Pandas `_ ++-* `Method Chaining `_ ++-* `Indexes `_ ++-* `Performance `_ ++-* `Tidy Data `_ ++-* `Visualization `_ ++-* `Timeseries `_ +++* `Modern Pandas `_ +++* `Method Chaining `_ +++* `Indexes `_ +++* `Performance `_ +++* `Tidy Data `_ +++* `Visualization `_ +++* `Timeseries `_ ++ ++ Excel charts with pandas, vincent and xlsxwriter ++ ------------------------------------------------ ++@@ -89,21 +89,21 @@ Video tutorials ++ * `Data analysis in Python with pandas `_ ++ (2016-2018) ++ `GitHub repo `__ and ++- `Jupyter Notebook `__ +++ `Jupyter Notebook `__ ++ * `Best practices with pandas `_ ++ (2018) ++ `GitHub repo `__ and ++- `Jupyter Notebook `__ +++ `Jupyter Notebook `__ ++ ++ ++ Various tutorials ++ ----------------- ++ ++-* `Wes McKinney's (pandas BDFL) blog `_ +++* `Wes McKinney's (pandas BDFL) blog `_ ++ * `Statistical analysis made easy in Python with SciPy and pandas DataFrames, by Randal Olson `_ ++-* `Statistical Data Analysis in Python, tutorial videos, by Christopher Fonnesbeck from SciPy 2013 `_ ++-* `Financial analysis in Python, by Thomas Wiecki `_ +++* `Statistical Data Analysis in Python, tutorial videos, by Christopher Fonnesbeck from SciPy 2013 `_ +++* `Financial analysis in Python, by Thomas Wiecki `_ ++ * `Intro to pandas data structures, by Greg Reda `_ ++-* `Pandas and Python: Top 10, by Manish Amde `_ ++-* `Pandas DataFrames Tutorial, by Karlijn Willems `_ +++* `Pandas and Python: Top 10, by Manish Amde `_ +++* `Pandas DataFrames Tutorial, by Karlijn Willems `_ ++ * `A concise tutorial with real life examples `_ ++--- a/doc/source/user_guide/computation.rst +++++ b/doc/source/user_guide/computation.rst ++@@ -58,7 +58,7 @@ series in the DataFrame, also excluding ++ is not guaranteed to be positive semi-definite. This could lead to ++ estimated correlations having absolute values which are greater than one, ++ and/or a non-invertible covariance matrix. See `Estimation of covariance ++- matrices `_ +++ matrices `_ ++ for more details. ++ ++ .. ipython:: python ++@@ -346,7 +346,7 @@ if installed as an optional dependency. ++ ``engine='numba'`` and ``engine_kwargs`` arguments (``raw`` must also be set to ``True``). ++ Numba will be applied in potentially two routines: ++ ++-1. If ``func`` is a standard Python function, the engine will `JIT `__ +++1. If ``func`` is a standard Python function, the engine will `JIT `__ ++ the passed function. ``func`` can also be a JITed function in which case the engine will not JIT the function again. ++ 2. The engine will JIT the for loop where the apply function is applied to each window. ++ ++@@ -1063,5 +1063,5 @@ are scaled by debiasing factors ++ ++ (For :math:`w_i = 1`, this reduces to the usual :math:`N / (N - 1)` factor, ++ with :math:`N = t + 1`.) ++-See `Weighted Sample Variance `__ +++See `Weighted Sample Variance `__ ++ on Wikipedia for further details. ++--- a/doc/source/user_guide/enhancingperf.rst +++++ b/doc/source/user_guide/enhancingperf.rst ++@@ -20,7 +20,7 @@ Cython (writing C extensions for pandas) ++ ++ For many use cases writing pandas in pure Python and NumPy is sufficient. In some ++ computationally heavy applications however, it can be possible to achieve sizable ++-speed-ups by offloading work to `cython `__. +++speed-ups by offloading work to `cython `__. ++ ++ This tutorial assumes you have refactored as much as possible in Python, for example ++ by trying to remove for-loops and making use of NumPy vectorization. It's always worth ++@@ -69,7 +69,7 @@ We achieve our result by using ``apply`` ++ ++ But clearly this isn't fast enough for us. Let's take a look and see where the ++ time is spent during this operation (limited to the most time consuming ++-four calls) using the `prun ipython magic function `__: +++four calls) using the `prun ipython magic function `__: ++ ++ .. ipython:: python ++ ++@@ -298,7 +298,7 @@ advanced Cython techniques: ++ Even faster, with the caveat that a bug in our Cython code (an off-by-one error, ++ for example) might cause a segfault because memory access isn't checked. ++ For more about ``boundscheck`` and ``wraparound``, see the Cython docs on ++-`compiler directives `__. +++`compiler directives `__. ++ ++ .. _enhancingperf.numba: ++ ++@@ -423,9 +423,9 @@ prefer that Numba throw an error if it c ++ speeds up your code, pass Numba the argument ++ ``nopython=True`` (e.g. ``@numba.jit(nopython=True)``). For more on ++ troubleshooting Numba modes, see the `Numba troubleshooting page ++-`__. +++`__. ++ ++-Read more in the `Numba docs `__. +++Read more in the `Numba docs `__. ++ ++ .. _enhancingperf.eval: ++ ++--- a/doc/source/user_guide/io.rst +++++ b/doc/source/user_guide/io.rst ++@@ -4260,11 +4260,11 @@ control compression: ``complevel`` and ` ++ - `zlib `_: The default compression library. A classic in terms of compression, achieves good compression rates but is somewhat slow. ++ - `lzo `_: Fast compression and decompression. ++ - `bzip2 `_: Good compression rates. ++- - `blosc `_: Fast compression and decompression. +++ - `blosc `_: Fast compression and decompression. ++ ++ Support for alternative blosc compressors: ++ ++- - `blosc:blosclz `_ This is the +++ - `blosc:blosclz `_ This is the ++ default compressor for ``blosc`` ++ - `blosc:lz4 ++ `_: ++@@ -5018,7 +5018,7 @@ Possible values are: ++ like *Presto* and *Redshift*, but has worse performance for ++ traditional SQL backend if the table contains many columns. ++ For more information check the SQLAlchemy `documention ++- `__. +++ `__. ++ - callable with signature ``(pd_table, conn, keys, data_iter)``: ++ This can be used to implement a more performant insertion method based on ++ specific backend dialect features. ++--- a/doc/source/user_guide/missing_data.rst +++++ b/doc/source/user_guide/missing_data.rst ++@@ -467,9 +467,9 @@ at the new values. ++ interp_s = ser.reindex(new_index).interpolate(method='pchip') ++ interp_s[49:51] ++ ++-.. _scipy: http://www.scipy.org ++-.. _documentation: http://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation ++-.. _guide: http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html +++.. _scipy: https://www.scipy.org +++.. _documentation: https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation +++.. _guide: https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html ++ ++ .. _missing_data.interp_limits: ++ ++--- a/doc/source/user_guide/style.ipynb +++++ b/doc/source/user_guide/style.ipynb ++@@ -6,7 +6,7 @@ ++ "source": [ ++ "# Styling\n", ++ "\n", ++- "This document is written as a Jupyter Notebook, and can be viewed or downloaded [here](http://nbviewer.ipython.org/github/pandas-dev/pandas/blob/master/doc/source/user_guide/style.ipynb).\n", +++ "This document is written as a Jupyter Notebook, and can be viewed or downloaded [here](https://nbviewer.ipython.org/github/pandas-dev/pandas/blob/master/doc/source/user_guide/style.ipynb).\n", ++ "\n", ++ "You can apply **conditional formatting**, the visual styling of a DataFrame\n", ++ "depending on the data within, by using the ``DataFrame.style`` property.\n", ++@@ -462,7 +462,7 @@ ++ "cell_type": "markdown", ++ "metadata": {}, ++ "source": [ ++- "You can create \"heatmaps\" with the `background_gradient` method. These require matplotlib, and we'll use [Seaborn](http://stanford.edu/~mwaskom/software/seaborn/) to get a nice colormap." +++ "You can create \"heatmaps\" with the `background_gradient` method. These require matplotlib, and we'll use [Seaborn](https://stanford.edu/~mwaskom/software/seaborn/) to get a nice colormap." ++ ] ++ }, ++ { ++--- a/doc/source/user_guide/visualization.rst +++++ b/doc/source/user_guide/visualization.rst ++@@ -264,7 +264,7 @@ horizontal and cumulative histograms can ++ plt.close('all') ++ ++ See the :meth:`hist ` method and the ++-`matplotlib hist documentation `__ for more. +++`matplotlib hist documentation `__ for more. ++ ++ ++ The existing interface ``DataFrame.hist`` to plot histogram still can be used. ++@@ -370,7 +370,7 @@ For example, horizontal and custom-posit ++ ++ ++ See the :meth:`boxplot ` method and the ++-`matplotlib boxplot documentation `__ for more. +++`matplotlib boxplot documentation `__ for more. ++ ++ ++ The existing interface ``DataFrame.boxplot`` to plot boxplot still can be used. ++@@ -591,7 +591,7 @@ bubble chart using a column of the ``Dat ++ plt.close('all') ++ ++ See the :meth:`scatter ` method and the ++-`matplotlib scatter documentation `__ for more. +++`matplotlib scatter documentation `__ for more. ++ ++ .. _visualization.hexbin: ++ ++@@ -651,7 +651,7 @@ given by column ``z``. The bins are aggr ++ plt.close('all') ++ ++ See the :meth:`hexbin ` method and the ++-`matplotlib hexbin documentation `__ for more. +++`matplotlib hexbin documentation `__ for more. ++ ++ .. _visualization.pie: ++ ++@@ -749,7 +749,7 @@ If you pass values whose sum total is le ++ @savefig series_pie_plot_semi.png ++ series.plot.pie(figsize=(6, 6)) ++ ++-See the `matplotlib pie documentation `__ for more. +++See the `matplotlib pie documentation `__ for more. ++ ++ .. ipython:: python ++ :suppress: ++@@ -1268,7 +1268,7 @@ tick locator methods, it is useful to ca ++ date tick adjustment from matplotlib for figures whose ticklabels overlap. ++ ++ See the :meth:`autofmt_xdate ` method and the ++-`matplotlib documentation `__ for more. +++`matplotlib documentation `__ for more. ++ ++ Subplots ++ ~~~~~~~~ ++@@ -1477,7 +1477,7 @@ as seen in the example below. ++ There also exists a helper function ``pandas.plotting.table``, which creates a ++ table from :class:`DataFrame` or :class:`Series`, and adds it to an ++ ``matplotlib.Axes`` instance. This function can accept keywords which the ++-matplotlib `table `__ has. +++matplotlib `table `__ has. ++ ++ .. ipython:: python ++ ++@@ -1495,7 +1495,7 @@ matplotlib `table `__ for more. +++**Note**: You can get table instances on the axes using ``axes.tables`` property for further decorations. See the `matplotlib table documentation `__ for more. ++ ++ .. _visualization.colormaps: ++ ++@@ -1505,7 +1505,7 @@ Colormaps ++ A potential issue when plotting a large number of columns is that it can be ++ difficult to distinguish some series due to repetition in the default colors. To ++ remedy this, ``DataFrame`` plotting supports the use of the ``colormap`` argument, ++-which accepts either a Matplotlib `colormap `__ +++which accepts either a Matplotlib `colormap `__ ++ or a string that is a name of a colormap registered with Matplotlib. A ++ visualization of the default matplotlib colormaps is available `here ++ `__. ++--- a/doc/source/whatsnew/index.rst +++++ b/doc/source/whatsnew/index.rst ++@@ -7,7 +7,7 @@ Release Notes ++ ************* ++ ++ This is the list of changes to pandas between each release. For full details, ++-see the commit logs at http://github.com/pandas-dev/pandas. For install and +++see the commit logs at https://github.com/pandas-dev/pandas. For install and ++ upgrade instructions, see :ref:`install`. ++ ++ Version 1.0 ++--- a/pandas/_libs/intervaltree.pxi.in +++++ b/pandas/_libs/intervaltree.pxi.in ++@@ -26,7 +26,7 @@ cdef class IntervalTree(IntervalMixin): ++ """A centered interval tree ++ ++ Based off the algorithm described on Wikipedia: ++- http://en.wikipedia.org/wiki/Interval_tree +++ https://en.wikipedia.org/wiki/Interval_tree ++ ++ we are emulating the IndexEngine interface ++ """ ++--- a/pandas/_libs/src/klib/khash.h +++++ b/pandas/_libs/src/klib/khash.h ++@@ -53,7 +53,7 @@ int main() { ++ speed for simple keys. Thank Zilong Tan for the suggestion. Reference: ++ ++ - https://github.com/stefanocasazza/ULib ++- - http://nothings.org/computer/judy/ +++ - https://nothings.org/computer/judy/ ++ ++ * Allow to optionally use linear probing which usually has better ++ performance for random input. Double hashing is still the default as it ++--- a/pandas/_libs/src/skiplist.h +++++ b/pandas/_libs/src/skiplist.h ++@@ -10,7 +10,7 @@ Flexibly-sized, index-able skiplist data ++ list of values ++ ++ Port of Wes McKinney's Cython version of Raymond Hettinger's original pure ++-Python recipe (http://rhettinger.wordpress.com/2010/02/06/lost-knowledge/) +++Python recipe (https://rhettinger.wordpress.com/2010/02/06/lost-knowledge/) ++ */ ++ ++ #ifndef PANDAS__LIBS_SRC_SKIPLIST_H_ ++--- a/pandas/_libs/src/ujson/lib/ultrajson.h +++++ b/pandas/_libs/src/ujson/lib/ultrajson.h ++@@ -30,7 +30,7 @@ https://github.com/client9/stringencoder ++ Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved. ++ ++ Numeric decoder derived from from TCL library ++-http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms +++https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms ++ * Copyright (c) 1988-1993 The Regents of the University of California. ++ * Copyright (c) 1994 Sun Microsystems, Inc. ++ */ ++--- a/pandas/_libs/src/ujson/lib/ultrajsondec.c +++++ b/pandas/_libs/src/ujson/lib/ultrajsondec.c ++@@ -33,7 +33,7 @@ Copyright (c) 2007 Nick Galbreath -- ni ++ reserved. ++ ++ Numeric decoder derived from from TCL library ++-http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms +++https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms ++ * Copyright (c) 1988-1993 The Regents of the University of California. ++ * Copyright (c) 1994 Sun Microsystems, Inc. ++ */ ++--- a/pandas/_libs/src/ujson/lib/ultrajsonenc.c +++++ b/pandas/_libs/src/ujson/lib/ultrajsonenc.c ++@@ -33,7 +33,7 @@ Copyright (c) 2007 Nick Galbreath -- ni ++ reserved. ++ ++ Numeric decoder derived from from TCL library ++-http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms +++https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms ++ * Copyright (c) 1988-1993 The Regents of the University of California. ++ * Copyright (c) 1994 Sun Microsystems, Inc. ++ */ ++--- a/pandas/_libs/src/ujson/python/JSONtoObj.c +++++ b/pandas/_libs/src/ujson/python/JSONtoObj.c ++@@ -30,7 +30,7 @@ https://github.com/client9/stringencoder ++ Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved. ++ ++ Numeric decoder derived from from TCL library ++-http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms +++https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms ++ * Copyright (c) 1988-1993 The Regents of the University of California. ++ * Copyright (c) 1994 Sun Microsystems, Inc. ++ */ ++--- a/pandas/_libs/src/ujson/python/objToJSON.c +++++ b/pandas/_libs/src/ujson/python/objToJSON.c ++@@ -31,7 +31,7 @@ Copyright (c) 2007 Nick Galbreath -- ni ++ reserved. ++ ++ Numeric decoder derived from from TCL library ++-http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms +++https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms ++ * Copyright (c) 1988-1993 The Regents of the University of California. ++ * Copyright (c) 1994 Sun Microsystems, Inc. ++ */ ++--- a/pandas/_libs/src/ujson/python/ujson.c +++++ b/pandas/_libs/src/ujson/python/ujson.c ++@@ -30,7 +30,7 @@ https://github.com/client9/stringencoder ++ Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved. ++ ++ Numeric decoder derived from from TCL library ++-http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms +++https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms ++ * Copyright (c) 1988-1993 The Regents of the University of California. ++ * Copyright (c) 1994 Sun Microsystems, Inc. ++ */ ++--- a/pandas/_libs/src/ujson/python/version.h +++++ b/pandas/_libs/src/ujson/python/version.h ++@@ -30,7 +30,7 @@ https://github.com/client9/stringencoder ++ Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved. ++ ++ Numeric decoder derived from from TCL library ++-http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms +++https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms ++ * Copyright (c) 1988-1993 The Regents of the University of California. ++ * Copyright (c) 1994 Sun Microsystems, Inc. ++ */ ++--- a/pandas/_libs/tslibs/c_timestamp.pyx +++++ b/pandas/_libs/tslibs/c_timestamp.pyx ++@@ -124,7 +124,7 @@ cdef class _Timestamp(datetime): ++ ++ def __reduce_ex__(self, protocol): ++ # python 3.6 compat ++- # http://bugs.python.org/issue28730 +++ # https://bugs.python.org/issue28730 ++ # now __reduce_ex__ is defined and higher priority than __reduce__ ++ return self.__reduce__() ++ ++--- a/pandas/_libs/tslibs/nattype.pyx +++++ b/pandas/_libs/tslibs/nattype.pyx ++@@ -328,7 +328,7 @@ class NaTType(_NaT): ++ ++ def __reduce_ex__(self, protocol): ++ # python 3.6 compat ++- # http://bugs.python.org/issue28730 +++ # https://bugs.python.org/issue28730 ++ # now __reduce_ex__ is defined and higher priority than __reduce__ ++ return self.__reduce__() ++ ++--- a/pandas/core/accessor.py +++++ b/pandas/core/accessor.py ++@@ -186,7 +186,7 @@ class CachedAccessor: ++ return self._accessor ++ accessor_obj = self._accessor(obj) ++ # Replace the property with the accessor object. Inspired by: ++- # http://www.pydanny.com/cached-property.html +++ # https://www.pydanny.com/cached-property.html ++ # We need to use object.__setattr__ because we overwrite __setattr__ on ++ # NDFrame ++ object.__setattr__(obj, self._name, accessor_obj) ++--- a/pandas/core/arrays/datetimes.py +++++ b/pandas/core/arrays/datetimes.py ++@@ -1642,7 +1642,7 @@ default 'raise' ++ """ ++ Convert Datetime Array to float64 ndarray of Julian Dates. ++ 0 Julian date is noon January 1, 4713 BC. ++- http://en.wikipedia.org/wiki/Julian_day +++ https://en.wikipedia.org/wiki/Julian_day ++ """ ++ ++ # http://mysite.verizon.net/aesir_research/date/jdalg2.htm ++--- a/pandas/core/frame.py +++++ b/pandas/core/frame.py ++@@ -1488,9 +1488,9 @@ class DataFrame(NDFrame): ++ when getting user credentials. ++ ++ .. _local webserver flow: ++- http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server +++ https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server ++ .. _console flow: ++- http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console +++ https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console ++ ++ *New in version 0.2.0 of pandas-gbq*. ++ table_schema : list of dicts, optional ++@@ -3374,7 +3374,7 @@ class DataFrame(NDFrame): ++ * To select strings you must use the ``object`` dtype, but note that ++ this will return *all* object dtype columns ++ * See the `numpy dtype hierarchy ++- `__ +++ `__ ++ * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ++ ``'datetime64'`` ++ * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or ++@@ -7552,7 +7552,7 @@ Wild 185.0 ++ semi-definite. This could lead to estimate correlations having ++ absolute values which are greater than one, and/or a non-invertible ++ covariance matrix. See `Estimation of covariance matrices ++- `__ for more details. ++ ++ Examples ++--- a/pandas/core/generic.py +++++ b/pandas/core/generic.py ++@@ -2592,7 +2592,7 @@ class NDFrame(PandasObject, SelectionMix ++ ++ References ++ ---------- ++- .. [1] http://docs.sqlalchemy.org +++ .. [1] https://docs.sqlalchemy.org ++ .. [2] https://www.python.org/dev/peps/pep-0249/ ++ ++ Examples ++@@ -2801,7 +2801,7 @@ class NDFrame(PandasObject, SelectionMix ++ ++ Notes ++ ----- ++- See the `xarray docs `__ +++ See the `xarray docs `__ ++ ++ Examples ++ -------- ++@@ -6830,9 +6830,9 @@ class NDFrame(PandasObject, SelectionMix ++ similar names. These use the actual numerical values of the index. ++ For more information on their behavior, see the ++ `SciPy documentation ++- `__ +++ `__ ++ and `SciPy tutorial ++- `__. +++ `__. ++ ++ Examples ++ -------- ++--- a/pandas/io/formats/format.py +++++ b/pandas/io/formats/format.py ++@@ -410,7 +410,7 @@ class EastAsianTextAdjustment(TextAdjust ++ self.ambiguous_width = 1 ++ ++ # Definition of East Asian Width ++- # http://unicode.org/reports/tr11/ +++ # https://unicode.org/reports/tr11/ ++ # Ambiguous width can be changed by option ++ self._EAW_MAP = {"Na": 1, "N": 1, "W": 2, "F": 2, "H": 1} ++ ++--- a/pandas/io/gbq.py +++++ b/pandas/io/gbq.py ++@@ -64,9 +64,9 @@ def read_gbq( ++ when getting user credentials. ++ ++ .. _local webserver flow: ++- http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server +++ https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server ++ .. _console flow: ++- http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console +++ https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console ++ ++ *New in version 0.2.0 of pandas-gbq*. ++ dialect : str, default 'legacy' ++--- a/pandas/io/html.py +++++ b/pandas/io/html.py ++@@ -987,7 +987,7 @@ def read_html( ++ ++ is a valid attribute dictionary because the 'id' HTML tag attribute is ++ a valid HTML attribute for *any* HTML tag as per `this document ++- `__. :: +++ `__. :: ++ ++ attrs = {'asdf': 'table'} ++ ++@@ -996,7 +996,7 @@ def read_html( ++ table attributes can be found `here ++ `__. A ++ working draft of the HTML 5 spec can be found `here ++- `__. It contains the +++ `__. It contains the ++ latest information on table attributes for the modern web. ++ ++ parse_dates : bool, optional ++--- a/pandas/io/json/_table_schema.py +++++ b/pandas/io/json/_table_schema.py ++@@ -1,7 +1,7 @@ ++ """ ++ Table Schema builders ++ ++-http://specs.frictionlessdata.io/json-table-schema/ +++https://specs.frictionlessdata.io/json-table-schema/ ++ """ ++ import warnings ++ ++--- a/pandas/io/stata.py +++++ b/pandas/io/stata.py ++@@ -7,7 +7,7 @@ project who also developed the StataWrit ++ a once again improved version. ++ ++ You can find more information on http://presbrey.mit.edu/PyDTA and ++-http://www.statsmodels.org/devel/ +++https://www.statsmodels.org/devel/ ++ """ ++ from collections import abc ++ import datetime ++@@ -207,7 +207,7 @@ stata_epoch = datetime.datetime(1960, 1, ++ ++ def _stata_elapsed_date_to_datetime_vec(dates, fmt): ++ """ ++- Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime +++ Convert from SIF to datetime. https://www.stata.com/help.cgi?datetime ++ ++ Parameters ++ ---------- ++@@ -372,7 +372,7 @@ def _stata_elapsed_date_to_datetime_vec( ++ ++ def _datetime_to_stata_elapsed_vec(dates, fmt): ++ """ ++- Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime +++ Convert from datetime to SIF. https://www.stata.com/help.cgi?datetime ++ ++ Parameters ++ ---------- ++@@ -733,7 +733,7 @@ class StataMissingValue: ++ ++ Notes ++ ----- ++- More information: +++ More information: ++ ++ Integer missing values make the code '.', '.a', ..., '.z' to the ranges ++ 101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ... ++--- a/pandas/plotting/_misc.py +++++ b/pandas/plotting/_misc.py ++@@ -149,7 +149,7 @@ def radviz(frame, class_column, ax=None, ++ influence of all dimensions. ++ ++ More info available at the `original article ++- `_ +++ `_ ++ describing RadViz. ++ ++ Parameters ++--- a/pandas/tests/indexes/multi/test_analytics.py +++++ b/pandas/tests/indexes/multi/test_analytics.py ++@@ -326,7 +326,7 @@ def test_map_dictlike(idx, mapper): ++ ) ++ def test_numpy_ufuncs(idx, func): ++ # test ufuncs of numpy. see: ++- # http://docs.scipy.org/doc/numpy/reference/ufuncs.html +++ # https://docs.scipy.org/doc/numpy/reference/ufuncs.html ++ ++ if _np_version_under1p17: ++ expected_exception = AttributeError ++--- a/pandas/tests/indexes/test_numpy_compat.py +++++ b/pandas/tests/indexes/test_numpy_compat.py ++@@ -46,7 +46,7 @@ from pandas.core.indexes.datetimelike im ++ ) ++ def test_numpy_ufuncs_basic(indices, func): ++ # test ufuncs of numpy, see: ++- # http://docs.scipy.org/doc/numpy/reference/ufuncs.html +++ # https://docs.scipy.org/doc/numpy/reference/ufuncs.html ++ ++ idx = indices ++ if isinstance(idx, DatetimeIndexOpsMixin): ++@@ -77,7 +77,7 @@ def test_numpy_ufuncs_basic(indices, fun ++ ) ++ def test_numpy_ufuncs_other(indices, func): ++ # test ufuncs of numpy, see: ++- # http://docs.scipy.org/doc/numpy/reference/ufuncs.html +++ # https://docs.scipy.org/doc/numpy/reference/ufuncs.html ++ ++ idx = indices ++ if isinstance(idx, (DatetimeIndex, TimedeltaIndex)): ++--- a/pandas/tests/tseries/offsets/test_fiscal.py +++++ b/pandas/tests/tseries/offsets/test_fiscal.py ++@@ -95,7 +95,7 @@ class TestFY5253LastOfMonth(Base): ++ ++ on_offset_cases = [ ++ # From Wikipedia (see: ++- # http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar#Last_Saturday_of_the_month_at_fiscal_year_end) +++ # https://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar#Last_Saturday_of_the_month_at_fiscal_year_end) ++ (offset_lom_sat_aug, datetime(2006, 8, 26), True), ++ (offset_lom_sat_aug, datetime(2007, 8, 25), True), ++ (offset_lom_sat_aug, datetime(2008, 8, 30), True), ++@@ -208,7 +208,7 @@ class TestFY5253NearestEndMonth(Base): ++ ++ on_offset_cases = [ ++ # From Wikipedia (see: ++- # http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar +++ # https://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar ++ # #Saturday_nearest_the_end_of_month) ++ # 2006-09-02 2006 September 2 ++ # 2007-09-01 2007 September 1 ++--- a/pandas/tseries/offsets.py +++++ b/pandas/tseries/offsets.py ++@@ -2062,7 +2062,7 @@ class FY5253(DateOffset): ++ such as retail, manufacturing and parking industry. ++ ++ For more information see: ++- http://en.wikipedia.org/wiki/4-4-5_calendar +++ https://en.wikipedia.org/wiki/4-4-5_calendar ++ ++ The year may either: ++ ++@@ -2270,7 +2270,7 @@ class FY5253Quarter(DateOffset): ++ such as retail, manufacturing and parking industry. ++ ++ For more information see: ++- http://en.wikipedia.org/wiki/4-4-5_calendar +++ https://en.wikipedia.org/wiki/4-4-5_calendar ++ ++ The year may either: ++ ++--- a/pandas/util/_decorators.py +++++ b/pandas/util/_decorators.py ++@@ -248,7 +248,7 @@ def rewrite_axis_style_signature( ++ ++ ++ # Substitution and Appender are derived from matplotlib.docstring (1.1.0) ++-# module http://matplotlib.org/users/license.html +++# module https://matplotlib.org/users/license.html ++ ++ ++ class Substitution: ++--- a/setup.py +++++ b/setup.py ++@@ -60,7 +60,7 @@ except ImportError: ++ ++ # The import of Extension must be after the import of Cython, otherwise ++ # we do not get the appropriately patched class. ++-# See https://cython.readthedocs.io/en/latest/src/reference/compilation.html +++# See https://cython.readthedocs.io/en/latest/src/userguide/source_files_and_compilation.html ++ from distutils.extension import Extension # noqa: E402 isort:skip ++ from distutils.command.build import build # noqa: E402 isort:skip ++ ++--- a/versioneer.py +++++ b/versioneer.py ++@@ -1677,7 +1677,7 @@ def do_setup(): ++ except EnvironmentError: ++ pass ++ # That doesn't cover everything MANIFEST.in can do ++- # (http://docs.python.org/2/distutils/sourcedist.html#commands), so +++ # (https://docs.python.org/2/distutils/sourcedist.html#commands), so ++ # it might give some false negatives. Appending redundant 'include' ++ # lines is safe, though. ++ if "versioneer.py" not in simple_includes: ++--- a/web/pandas/about/citing.md +++++ b/web/pandas/about/citing.md ++@@ -4,7 +4,7 @@ ++ ++ If you use _pandas_ for a scientific publication, we would appreciate citations to one of the following papers: ++ ++-- [Data structures for statistical computing in python](http://conference.scipy.org/proceedings/scipy2010/pdfs/mckinney.pdf), +++- [Data structures for statistical computing in python](https://conference.scipy.org/proceedings/scipy2010/pdfs/mckinney.pdf), ++ McKinney, Proceedings of the 9th Python in Science Conference, Volume 445, 2010. ++ ++ @inproceedings{mckinney2010data, ++--- a/web/pandas/about/index.md +++++ b/web/pandas/about/index.md ++@@ -2,8 +2,8 @@ ++ ++ ## History of development ++ ++-In 2008, _pandas_ development began at [AQR Capital Management](http://www.aqr.com). ++-By the end of 2009 it had been [open sourced](http://en.wikipedia.org/wiki/Open_source), +++In 2008, _pandas_ development began at [AQR Capital Management](https://www.aqr.com). +++By the end of 2009 it had been [open sourced](https://en.wikipedia.org/wiki/Open_source), ++ and is actively supported today by a community of like-minded individuals around the world who ++ contribute their valuable time and energy to help make open source _pandas_ ++ possible. Thank you to [all of our contributors](team.html). ++--- a/web/pandas/community/coc.md +++++ b/web/pandas/community/coc.md ++@@ -54,10 +54,10 @@ incident. ++ ++ This Code of Conduct is adapted from the [Contributor Covenant][homepage], ++ version 1.3.0, available at ++-[http://contributor-covenant.org/version/1/3/0/][version], +++[https://www.contributor-covenant.org/version/1/3/0/][version], ++ and the [Swift Code of Conduct][swift]. ++ ++-[homepage]: http://contributor-covenant.org ++-[version]: http://contributor-covenant.org/version/1/3/0/ +++[homepage]: https://www.contributor-covenant.org +++[version]: https://www.contributor-covenant.org/version/1/3/0/ ++ [swift]: https://swift.org/community/#code-of-conduct ++ ++--- a/web/pandas/community/ecosystem.md +++++ b/web/pandas/community/ecosystem.md ++@@ -264,7 +264,7 @@ which pandas excels. ++ ++ ## Out-of-core ++ ++-### [Blaze](http://blaze.pydata.org/) +++### [Blaze](https://blaze.pydata.org/) ++ ++ Blaze provides a standard API for doing computations with various ++ in-memory and on-disk backends: NumPy, Pandas, SQLAlchemy, MongoDB, ++--- a/web/pandas/index.html +++++ b/web/pandas/index.html ++@@ -7,7 +7,7 @@ ++

pandas

++

++ pandas is a fast, powerful, flexible and easy to use open source data analysis and manipulation tool,
++- built on top of the Python programming language. +++ built on top of the Python programming language. ++

++

++ Install pandas now! diff --cc debian/patches/mathjax-path.patch index 00000000,00000000..7073f317 new file mode 100644 --- /dev/null +++ b/debian/patches/mathjax-path.patch @@@ -1,0 -1,0 +1,16 @@@ ++Description: Use Debian packaged mathjax ++ ++Author: Andreas Tille ++Forwarded: not-needed ++ ++--- a/doc/source/conf.py +++++ b/doc/source/conf.py ++@@ -67,6 +67,8 @@ extensions = [ ++ "contributors", # custom pandas extension ++ ] ++ +++mathjax_path="MathJax.js" +++ ++ exclude_patterns = ["**.ipynb_checkpoints"] ++ try: ++ import nbconvert diff --cc debian/patches/matplotlib32_compat.patch index 00000000,00000000..fa6e07a9 new file mode 100644 --- /dev/null +++ b/debian/patches/matplotlib32_compat.patch @@@ -1,0 -1,0 +1,39 @@@ ++Description: Don't fail tests on harmless changes to dependencies ++ ++Don't assert that matplotlib rejects shorthand hex colors, ++as from 3.2 it accepts them: ++https://matplotlib.org/users/prev_whats_new/whats_new_3.2.0.html#digit-and-4-digit-hex-colors ++ ++Author: Rebecca N. Palmer ++Forwarded: accepted https://github.com/pandas-dev/pandas/pull/33262 ++ ++--- a/pandas/tests/plotting/test_frame.py +++++ b/pandas/tests/plotting/test_frame.py ++@@ -2036,12 +2036,6 @@ class TestDataFramePlots(TestPlotBase): ++ self._check_colors(ax.get_lines(), linecolors=custom_colors) ++ tm.close() ++ ++- with pytest.raises(ValueError): ++- # Color contains shorthand hex value results in ValueError ++- custom_colors = ["#F00", "#00F", "#FF0", "#000", "#FFF"] ++- # Forced show plot ++- _check_plot_works(df.plot, color=custom_colors) ++- ++ @pytest.mark.slow ++ def test_dont_modify_colors(self): ++ colors = ["r", "g", "b"] ++@@ -2093,14 +2087,6 @@ class TestDataFramePlots(TestPlotBase): ++ self._check_colors(ax.get_lines(), linecolors=[c]) ++ tm.close() ++ ++- with pytest.raises(ValueError): ++- # Color contains shorthand hex value results in ValueError ++- custom_colors = ["#F00", "#00F", "#FF0", "#000", "#FFF"] ++- # Forced show plot ++- # _check_plot_works adds an ax so catch warning. see GH #13188 ++- with tm.assert_produces_warning(UserWarning): ++- _check_plot_works(df.plot, color=custom_colors, subplots=True) ++- ++ rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))] ++ for cmap in ["jet", cm.jet]: ++ axes = df.plot(colormap=cmap, subplots=True) diff --cc debian/patches/matplotlib33_compat.patch index 00000000,00000000..3612d4b6 new file mode 100644 --- /dev/null +++ b/debian/patches/matplotlib33_compat.patch @@@ -1,0 -1,0 +1,247 @@@ ++Description: Matplotlib 3.3 compatibility fixups ++ ++Author: Tom Augspurger, Saul Shanabrook, Rebecca N. Palmer ++Bug-Debian: https://bugs.debian.org/966393 ++Origin: (partly) upstream commits 41022a8 + 00ea10c ++Forwarded: no ++ ++--- a/pandas/util/_test_decorators.py +++++ b/pandas/util/_test_decorators.py ++@@ -94,7 +94,7 @@ xfail_non_writeable = pytest.mark.xfail( ++ def _skip_if_no_mpl(): ++ mod = safe_import("matplotlib") ++ if mod: ++- mod.use("Agg", warn=True) +++ mod.use("Agg") ++ else: ++ return True ++ ++--- a/pandas/plotting/_matplotlib/boxplot.py +++++ b/pandas/plotting/_matplotlib/boxplot.py ++@@ -288,6 +288,11 @@ def boxplot( ++ if fontsize is not None: ++ ax.tick_params(axis="both", labelsize=fontsize) ++ if kwds.get("vert", 1): +++ ticks = ax.get_xticks() +++ if len(ticks) != len(keys): +++ i, remainder = divmod(len(ticks), len(keys)) +++ assert remainder == 0, remainder +++ keys *= i ++ ax.set_xticklabels(keys, rotation=rot) ++ else: ++ ax.set_yticklabels(keys, rotation=rot) ++--- a/pandas/plotting/_matplotlib/compat.py +++++ b/pandas/plotting/_matplotlib/compat.py ++@@ -21,3 +21,4 @@ _mpl_ge_2_2_3 = _mpl_version("2.2.3", op ++ _mpl_ge_3_0_0 = _mpl_version("3.0.0", operator.ge) ++ _mpl_ge_3_1_0 = _mpl_version("3.1.0", operator.ge) ++ _mpl_ge_3_2_0 = _mpl_version("3.2.0", operator.ge) +++_mpl_ge_3_3_0 = _mpl_version("3.3.0", operator.ge) ++--- a/pandas/plotting/_matplotlib/converter.py +++++ b/pandas/plotting/_matplotlib/converter.py ++@@ -15,7 +15,6 @@ from pandas._libs.tslibs import resoluti ++ from pandas._libs.tslibs.frequencies import FreqGroup, get_freq ++ ++ from pandas.core.dtypes.common import ( ++- is_datetime64_ns_dtype, ++ is_float, ++ is_float_dtype, ++ is_integer, ++@@ -246,19 +245,6 @@ def get_datevalue(date, freq): ++ raise ValueError(f"Unrecognizable date '{date}'") ++ ++ ++-def _dt_to_float_ordinal(dt): ++- """ ++- Convert :mod:`datetime` to the Gregorian date as UTC float days, ++- preserving hours, minutes, seconds and microseconds. Return value ++- is a :func:`float`. ++- """ ++- if isinstance(dt, (np.ndarray, Index, ABCSeries)) and is_datetime64_ns_dtype(dt): ++- base = dates.epoch2num(dt.asi8 / 1.0e9) ++- else: ++- base = dates.date2num(dt) ++- return base ++- ++- ++ # Datetime Conversion ++ class DatetimeConverter(dates.DateConverter): ++ @staticmethod ++@@ -274,15 +260,11 @@ class DatetimeConverter(dates.DateConver ++ def _convert_1d(values, unit, axis): ++ def try_parse(values): ++ try: ++- return _dt_to_float_ordinal(tools.to_datetime(values)) +++ return dates.date2num(tools.to_datetime(values)) ++ except Exception: ++ return values ++ ++- if isinstance(values, (datetime, pydt.date)): ++- return _dt_to_float_ordinal(values) ++- elif isinstance(values, np.datetime64): ++- return _dt_to_float_ordinal(tslibs.Timestamp(values)) ++- elif isinstance(values, pydt.time): +++ if isinstance(values, (datetime, pydt.date, np.datetime64, pydt.time)): ++ return dates.date2num(values) ++ elif is_integer(values) or is_float(values): ++ return values ++@@ -303,12 +285,10 @@ class DatetimeConverter(dates.DateConver ++ ++ try: ++ values = tools.to_datetime(values) ++- if isinstance(values, Index): ++- values = _dt_to_float_ordinal(values) ++- else: ++- values = [_dt_to_float_ordinal(x) for x in values] ++ except Exception: ++- values = _dt_to_float_ordinal(values) +++ pass +++ +++ values = dates.date2num(values) ++ ++ return values ++ ++@@ -429,8 +409,8 @@ class MilliSecondLocator(dates.DateLocat ++ interval = self._get_interval() ++ freq = f"{interval}L" ++ tz = self.tz.tzname(None) ++- st = _from_ordinal(dates.date2num(dmin)) # strip tz ++- ed = _from_ordinal(dates.date2num(dmax)) +++ st = dmin.replace(tzinfo=None) +++ ed = dmin.replace(tzinfo=None) ++ all_dates = date_range(start=st, end=ed, freq=freq, tz=tz).astype(object) ++ ++ try: ++--- a/pandas/tests/plotting/test_converter.py +++++ b/pandas/tests/plotting/test_converter.py ++@@ -27,6 +27,7 @@ except ImportError: ++ pass ++ ++ pytest.importorskip("matplotlib.pyplot") +++dates = pytest.importorskip("matplotlib.dates") ++ ++ ++ def test_registry_mpl_resets(): ++@@ -146,7 +147,7 @@ class TestDateTimeConverter: ++ ++ def test_conversion(self): ++ rs = self.dtc.convert(["2012-1-1"], None, None)[0] ++- xp = datetime(2012, 1, 1).toordinal() +++ xp = dates.date2num(datetime(2012, 1, 1)) ++ assert rs == xp ++ ++ rs = self.dtc.convert("2012-1-1", None, None) ++@@ -155,9 +156,6 @@ class TestDateTimeConverter: ++ rs = self.dtc.convert(date(2012, 1, 1), None, None) ++ assert rs == xp ++ ++- rs = self.dtc.convert(datetime(2012, 1, 1).toordinal(), None, None) ++- assert rs == xp ++- ++ rs = self.dtc.convert("2012-1-1", None, None) ++ assert rs == xp ++ ++--- a/pandas/tests/plotting/test_datetimelike.py +++++ b/pandas/tests/plotting/test_datetimelike.py ++@@ -306,7 +306,7 @@ class TestTSPlot(TestPlotBase): ++ bts = tm.makePeriodSeries() ++ _, ax = self.plt.subplots() ++ bts.plot(ax=ax) ++- assert ax.get_lines()[0].get_xydata()[0, 0] == bts.index[0].ordinal +++ ++ idx = ax.get_lines()[0].get_xdata() ++ assert PeriodIndex(data=idx).freqstr == "B" ++ ++@@ -1262,6 +1262,8 @@ class TestTSPlot(TestPlotBase): ++ @pytest.mark.slow ++ def test_irregular_ts_shared_ax_xlim(self): ++ # GH 2960 +++ from pandas.plotting._matplotlib.converter import DatetimeConverter +++ ++ ts = tm.makeTimeSeries()[:20] ++ ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]] ++ ++@@ -1272,8 +1274,8 @@ class TestTSPlot(TestPlotBase): ++ ++ # check that axis limits are correct ++ left, right = ax.get_xlim() ++- assert left <= ts_irregular.index.min().toordinal() ++- assert right >= ts_irregular.index.max().toordinal() +++ assert left <= DatetimeConverter.convert(ts_irregular.index.min(), "", ax) +++ assert right >= DatetimeConverter.convert(ts_irregular.index.max(), "", ax) ++ ++ @pytest.mark.slow ++ def test_secondary_y_non_ts_xlim(self): ++@@ -1328,6 +1330,8 @@ class TestTSPlot(TestPlotBase): ++ @pytest.mark.slow ++ def test_secondary_y_irregular_ts_xlim(self): ++ # GH 3490 - irregular-timeseries with secondary y +++ from pandas.plotting._matplotlib.converter import DatetimeConverter +++ ++ ts = tm.makeTimeSeries()[:20] ++ ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]] ++ ++@@ -1339,8 +1343,8 @@ class TestTSPlot(TestPlotBase): ++ ts_irregular[:5].plot(ax=ax) ++ ++ left, right = ax.get_xlim() ++- assert left <= ts_irregular.index.min().toordinal() ++- assert right >= ts_irregular.index.max().toordinal() +++ assert left <= DatetimeConverter.convert(ts_irregular.index.min(), "", ax) +++ assert right >= DatetimeConverter.convert(ts_irregular.index.max(), "", ax) ++ ++ def test_plot_outofbounds_datetime(self): ++ # 2579 - checking this does not raise ++@@ -1444,7 +1448,7 @@ class TestTSPlot(TestPlotBase): ++ s2.plot(ax=ax) ++ s1.plot(ax=ax) ++ ++- @pytest.mark.xfail(reason="GH9053 matplotlib does not use ax.xaxis.converter") +++ @pytest.mark.xfail(reason="GH9053 matplotlib does not use ax.xaxis.converter", strict=False) ++ def test_add_matplotlib_datetime64(self): ++ # GH9053 - ensure that a plot with PeriodConverter still understands ++ # datetime64 data. This still fails because matplotlib overrides the ++--- a/pandas/tests/plotting/test_frame.py +++++ b/pandas/tests/plotting/test_frame.py ++@@ -1531,6 +1531,7 @@ class TestDataFramePlots(TestPlotBase): ++ ax.xaxis.get_ticklocs(), np.arange(1, len(numeric_cols) + 1) ++ ) ++ assert len(ax.lines) == self.bp_n_objects * len(numeric_cols) +++ tm.close() ++ ++ axes = series.plot.box(rot=40) ++ self._check_ticks_props(axes, xrot=40, yrot=0) ++--- a/pandas/tests/plotting/test_series.py +++++ b/pandas/tests/plotting/test_series.py ++@@ -276,12 +276,14 @@ class TestSeriesPlots(TestPlotBase): ++ self._check_ticks_props(axes, xrot=30) ++ ++ def test_irregular_datetime(self): +++ from pandas.plotting._matplotlib.converter import DatetimeConverter +++ ++ rng = date_range("1/1/2000", "3/1/2000") ++ rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]] ++ ser = Series(randn(len(rng)), rng) ++ _, ax = self.plt.subplots() ++ ax = ser.plot(ax=ax) ++- xp = datetime(1999, 1, 1).toordinal() +++ xp = DatetimeConverter.convert(datetime(1999, 1, 1), "", ax) ++ ax.set_xlim("1/1/1999", "1/1/2001") ++ assert xp == ax.get_xlim()[0] ++ ++@@ -686,11 +688,13 @@ class TestSeriesPlots(TestPlotBase): ++ kinds = ( ++ plotting.PlotAccessor._common_kinds + plotting.PlotAccessor._series_kinds ++ ) ++- _, ax = self.plt.subplots() ++ for kind in kinds: ++- +++ _, ax = self.plt.subplots() ++ s.plot(kind=kind, ax=ax) +++ self.plt.close() +++ _, ax = self.plt.subplots() ++ getattr(s.plot, kind)() +++ self.plt.close() ++ ++ @pytest.mark.slow ++ def test_invalid_plot_data(self): diff --cc debian/patches/numba_fail_32bit.patch index 00000000,00000000..93155069 new file mode 100644 --- /dev/null +++ b/debian/patches/numba_fail_32bit.patch @@@ -1,0 -1,0 +1,52 @@@ ++Description: Allow some numba exceptions on 32 bit systems, warn on non-x86 ++ ++Specifying the exception type allows only explicit errors, ++not silently wrong answers ++ ++Numba has been observed to give wrong answers on mipsel ++and crash on ppc64el. ++ ++Author: Rebecca N. Palmer ++Forwarded: no ++ ++--- a/pandas/tests/window/test_numba.py +++++ b/pandas/tests/window/test_numba.py ++@@ -5,9 +5,15 @@ import pandas.util._test_decorators as t ++ ++ from pandas import Series ++ import pandas._testing as tm +++from pandas.compat import is_platform_32bit +++try: +++ from numba.core.errors import UnsupportedParforsError +++except ImportError: +++ UnsupportedParforsError = ImportError ++ ++ ++ @td.skip_if_no("numba", "0.46.0") +++@pytest.mark.xfail(condition=is_platform_32bit(), raises=UnsupportedParforsError, reason="some Numba functionality is not available on 32 bit systems", strict=False) ++ @pytest.mark.filterwarnings("ignore:\\nThe keyword argument") ++ # Filter warnings when parallel=True and the function can't be parallelized by Numba ++ class TestApply: ++--- a/pandas/core/window/numba_.py +++++ b/pandas/core/window/numba_.py ++@@ -6,7 +6,10 @@ import numpy as np ++ ++ from pandas._typing import Scalar ++ from pandas.compat._optional import import_optional_dependency ++- +++import platform +++import re +++import warnings +++warn_numba_platform = "Non-x86 system detected, Numba may give wrong results or crash" if not bool(re.match('i.?86|x86',platform.uname()[4])) else False ++ ++ def make_rolling_apply( ++ func: Callable[..., Scalar], ++@@ -37,6 +40,8 @@ def make_rolling_apply( ++ Numba function ++ """ ++ numba = import_optional_dependency("numba") +++ if warn_numba_platform: +++ warnings.warn(warn_numba_platform) ++ ++ if parallel: ++ loop_range = numba.prange diff --cc debian/patches/numpy119_compat.patch index 00000000,00000000..91fd42f0 new file mode 100644 --- /dev/null +++ b/debian/patches/numpy119_compat.patch @@@ -1,0 -1,0 +1,60 @@@ ++Description: Don't fail with Numpy 1.19 ++ ++Creating a nested DataFrame (which was already not recommended) ++via the constructor no longer works. ++Give a clearer error and xfail the tests. ++ ++Author: Rebecca N. Palmer ++Bug: https://github.com/pandas-dev/pandas/issues/32289 ++Bug-Debian: https://bugs.debian.org/963817 ++Forwarded: no ++ ++--- pandas-1.0.4+dfsg.orig/pandas/core/internals/construction.py +++++ pandas-1.0.4+dfsg/pandas/core/internals/construction.py ++@@ -292,7 +292,7 @@ def prep_ndarray(values, copy=True) -> n ++ if values.ndim == 1: ++ values = values.reshape((values.shape[0], 1)) ++ elif values.ndim != 2: ++- raise ValueError("Must pass 2-d input") +++ raise ValueError("Plain DataFrames must be 2-d - for higher dimensions use MultiIndex or the python3-xarray package. If you are trying to create a nested DataFrame (which is not recommended) see https://github.com/pandas-dev/pandas/issues/32289") ++ ++ return values ++ ++--- pandas-1.0.4+dfsg.orig/pandas/tests/frame/test_constructors.py +++++ pandas-1.0.4+dfsg/pandas/tests/frame/test_constructors.py ++@@ -145,7 +145,7 @@ class TestDataFrameConstructors: ++ assert df.loc[1, 0] is None ++ assert df.loc[0, 1] == "2" ++ ++- @pytest.mark.xfail(_is_numpy_dev, reason="Interprets list of frame as 3D") +++ @pytest.mark.xfail(condition=True,strict=False,raises=ValueError, reason="Interprets list of frame as 3D, https://github.com/pandas-dev/pandas/issues/32289") ++ def test_constructor_list_frames(self): ++ # see gh-3243 ++ result = DataFrame([DataFrame()]) ++@@ -477,7 +477,7 @@ class TestDataFrameConstructors: ++ DataFrame(index=[0], columns=range(0, 4), data=arr) ++ ++ # higher dim raise exception ++- with pytest.raises(ValueError, match="Must pass 2-d input"): +++ with pytest.raises(ValueError, match="Plain DataFrames must be 2-d"): ++ DataFrame(np.zeros((3, 3, 3)), columns=["A", "B", "C"], index=[1]) ++ ++ # wrong size axis labels ++@@ -498,7 +498,7 @@ class TestDataFrameConstructors: ++ with pytest.raises(ValueError, match=msg): ++ DataFrame({"a": False, "b": True}) ++ ++- @pytest.mark.xfail(_is_numpy_dev, reason="Interprets embedded frame as 3D") +++ @pytest.mark.xfail(condition=True,strict=False,raises=ValueError, reason="Interprets embedded frame as 3D, https://github.com/pandas-dev/pandas/issues/32289") ++ def test_constructor_with_embedded_frames(self): ++ ++ # embedded data frames ++@@ -764,7 +764,7 @@ class TestDataFrameConstructors: ++ DataFrame(mat, columns=["A", "B"], index=[1, 2]) ++ ++ # higher dim raise exception ++- with pytest.raises(ValueError, match="Must pass 2-d input"): +++ with pytest.raises(ValueError, match="Plain DataFrames must be 2-d"): ++ DataFrame(empty((3, 3, 3)), columns=["A", "B", "C"], index=[1]) ++ ++ # automatic labeling diff --cc debian/patches/privacy.patch index 00000000,00000000..93a95ec3 new file mode 100644 --- /dev/null +++ b/debian/patches/privacy.patch @@@ -1,0 -1,0 +1,20 @@@ ++Description: Link to rather than embed Google calendar ++ ++Author: Rebecca N. Palmer ++Forwarded: not-needed ++ ++--- a/doc/source/development/meeting.rst +++++ b/doc/source/development/meeting.rst ++@@ -16,11 +16,7 @@ The minutes of past meetings are availab ++ Calendar ++ -------- ++ ++-This calendar shows all the developer meetings. ++- ++-.. raw:: html ++- ++- +++`This calendar `__ shows all the developer meetings. ++ ++ You can subscribe to this calendar with the following links: ++ diff --cc debian/patches/pyarrow_feather.patch index 00000000,00000000..b3015de2 new file mode 100644 --- /dev/null +++ b/debian/patches/pyarrow_feather.patch @@@ -1,0 -1,0 +1,46 @@@ ++Description: Skip feather tests if pyarrow not available ++ ++pandas now needs pyarrow.feather (not in Debian), ++not python3-feather-format, for read_feather/to_feather ++ ++Author: Rebecca N. Palmer ++Forwarded: accepted for 1.1 https://github.com/pandas-dev/pandas/pull/31144 ++ ++--- a/pandas/tests/io/test_common.py +++++ b/pandas/tests/io/test_common.py ++@@ -129,7 +129,7 @@ bar2,12,13,14,15 ++ (pd.read_csv, "os", FileNotFoundError, "csv"), ++ (pd.read_fwf, "os", FileNotFoundError, "txt"), ++ (pd.read_excel, "xlrd", FileNotFoundError, "xlsx"), ++- (pd.read_feather, "feather", Exception, "feather"), +++ (pd.read_feather, "pyarrow", Exception, "feather"), ++ (pd.read_hdf, "tables", FileNotFoundError, "h5"), ++ (pd.read_stata, "os", FileNotFoundError, "dta"), ++ (pd.read_sas, "os", FileNotFoundError, "sas7bdat"), ++@@ -165,7 +165,7 @@ bar2,12,13,14,15 ++ (pd.read_table, "os", FileNotFoundError, "csv"), ++ (pd.read_fwf, "os", FileNotFoundError, "txt"), ++ (pd.read_excel, "xlrd", FileNotFoundError, "xlsx"), ++- (pd.read_feather, "feather", Exception, "feather"), +++ (pd.read_feather, "pyarrow", Exception, "feather"), ++ (pd.read_hdf, "tables", FileNotFoundError, "h5"), ++ (pd.read_stata, "os", FileNotFoundError, "dta"), ++ (pd.read_sas, "os", FileNotFoundError, "sas7bdat"), ++@@ -212,7 +212,7 @@ bar2,12,13,14,15 ++ (pd.read_excel, "xlrd", ("io", "data", "excel", "test1.xlsx")), ++ ( ++ pd.read_feather, ++- "feather", +++ "pyarrow", ++ ("io", "data", "feather", "feather-0_3_1.feather"), ++ ), ++ pytest.param( ++@@ -249,7 +249,7 @@ bar2,12,13,14,15 ++ [ ++ ("to_csv", {}, "os"), ++ ("to_excel", {"engine": "xlwt"}, "xlwt"), ++- ("to_feather", {}, "feather"), +++ ("to_feather", {}, "pyarrow"), ++ ("to_html", {}, "os"), ++ ("to_json", {}, "os"), ++ ("to_latex", {}, "os"), diff --cc debian/patches/remove_ccbysa_snippets.patch index 00000000,00000000..0f27bd38 new file mode 100644 --- /dev/null +++ b/debian/patches/remove_ccbysa_snippets.patch @@@ -1,0 -1,0 +1,242 @@@ ++Description: Remove code from Stack Overflow ++ ++Stack Overflow content is CC-BY-SA licensed, ++which this package is not supposed to be. These snippets may be ++too small to be copyrightable, but removing them to be safe. ++ ++https://lists.debian.org/debian-legal/2020/04/threads.html#00018 ++ ++Author: Rebecca N. Palmer ++Forwarded: no - deletes some tests/examples without replacement ++ ++--- /dev/null +++++ b/doc/source/user_guide/cookbook.rst ++@@ -0,0 +1,22 @@ +++.. _cookbook: +++ +++{{ header }} +++ +++.. _cookbook.idioms: +++.. _cookbook.selection: +++.. _cookbook.multi_index: +++.. _cookbook.missing_data: +++.. _cookbook.grouping: +++.. _cookbook.pivot: +++.. _cookbook.resample: +++.. _cookbook.merge: +++.. _cookbook.plotting: +++.. _cookbook.csv: +++.. _cookbook.csv.multiple_files: +++.. _cookbook.sql: +++.. _cookbook.excel: +++.. _cookbook.html: +++.. _cookbook.hdf: +++.. _cookbook.binary: +++ +++This page has been removed for copyright reasons. ++--- a/doc/source/user_guide/index.rst +++++ b/doc/source/user_guide/index.rst ++@@ -42,4 +42,3 @@ Further information on any specific meth ++ scale ++ sparse ++ gotchas ++- cookbook ++--- a/pandas/io/sql.py +++++ b/pandas/io/sql.py ++@@ -1401,14 +1401,14 @@ def _get_valid_sqlite_name(name): ++ # Replace all " with "". ++ # Wrap the entire thing in double quotes. ++ ++- uname = _get_unicode_name(name) ++- if not len(uname): +++ name = _get_unicode_name(name) +++ if not len(name): ++ raise ValueError("Empty table or column name specified") ++ ++- nul_index = uname.find("\x00") ++- if nul_index >= 0: +++ if '\0' in name: ++ raise ValueError("SQLite identifier cannot contain NULs") ++- return '"' + uname.replace('"', '""') + '"' +++ name = name.replace('"', '""') +++ return '"' + name + '"' ++ ++ ++ _SAFE_NAMES_WARNING = ( ++--- a/pandas/tests/groupby/aggregate/test_other.py +++++ b/pandas/tests/groupby/aggregate/test_other.py ++@@ -24,30 +24,6 @@ from pandas.core.base import Specificati ++ from pandas.io.formats.printing import pprint_thing ++ ++ ++-def test_agg_api(): ++- # GH 6337 ++- # https://stackoverflow.com/questions/21706030/pandas-groupby-agg-function-column-dtype-error ++- # different api for agg when passed custom function with mixed frame ++- ++- df = DataFrame( ++- { ++- "data1": np.random.randn(5), ++- "data2": np.random.randn(5), ++- "key1": ["a", "a", "b", "b", "a"], ++- "key2": ["one", "two", "one", "two", "one"], ++- } ++- ) ++- grouped = df.groupby("key1") ++- ++- def peak_to_peak(arr): ++- return arr.max() - arr.min() ++- ++- expected = grouped.agg([peak_to_peak]) ++- expected.columns = ["data1", "data2"] ++- result = grouped.agg(peak_to_peak) ++- tm.assert_frame_equal(result, expected) ++- ++- ++ def test_agg_datetimes_mixed(): ++ data = [[1, "2012-01-01", 1.0], [2, "2012-01-02", 2.0], [3, None, 3.0]] ++ ++--- a/pandas/tests/groupby/test_categorical.py +++++ b/pandas/tests/groupby/test_categorical.py ++@@ -796,29 +796,6 @@ def test_groupby_empty_with_category(): ++ tm.assert_series_equal(result, expected) ++ ++ ++-def test_sort(): ++- ++- # https://stackoverflow.com/questions/23814368/sorting-pandas- ++- # categorical-labels-after-groupby ++- # This should result in a properly sorted Series so that the plot ++- # has a sorted x axis ++- # self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar') ++- ++- df = DataFrame({"value": np.random.randint(0, 10000, 100)}) ++- labels = [f"{i} - {i+499}" for i in range(0, 10000, 500)] ++- cat_labels = Categorical(labels, labels) ++- ++- df = df.sort_values(by=["value"], ascending=True) ++- df["value_group"] = pd.cut( ++- df.value, range(0, 10500, 500), right=False, labels=cat_labels ++- ) ++- ++- res = df.groupby(["value_group"], observed=False)["value_group"].count() ++- exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))] ++- exp.index = CategoricalIndex(exp.index, name=exp.index.name) ++- tm.assert_series_equal(res, exp) ++- ++- ++ def test_sort2(): ++ # dataframe groupby sort was being ignored # GH 8868 ++ df = DataFrame( ++--- a/pandas/tests/indexing/multiindex/test_chaining_and_caching.py +++++ b/pandas/tests/indexing/multiindex/test_chaining_and_caching.py ++@@ -6,27 +6,6 @@ import pandas._testing as tm ++ import pandas.core.common as com ++ ++ ++-def test_detect_chained_assignment(): ++- # Inplace ops, originally from: ++- # https://stackoverflow.com/questions/20508968/series-fillna-in-a-multiindex-dataframe-does-not-fill-is-this-a-bug ++- a = [12, 23] ++- b = [123, None] ++- c = [1234, 2345] ++- d = [12345, 23456] ++- tuples = [("eyes", "left"), ("eyes", "right"), ("ears", "left"), ("ears", "right")] ++- events = { ++- ("eyes", "left"): a, ++- ("eyes", "right"): b, ++- ("ears", "left"): c, ++- ("ears", "right"): d, ++- } ++- multiind = MultiIndex.from_tuples(tuples, names=["part", "side"]) ++- zed = DataFrame(events, index=["a", "b"], columns=multiind) ++- ++- with pytest.raises(com.SettingWithCopyError): ++- zed["eyes"]["right"].fillna(value=555, inplace=True) ++- ++- ++ def test_cache_updating(): ++ # 5216 ++ # make sure that we don't try to set a dead cache ++--- a/pandas/tests/indexing/multiindex/test_setitem.py +++++ b/pandas/tests/indexing/multiindex/test_setitem.py ++@@ -140,35 +140,7 @@ class TestMultiIndexSetItem: ++ with pytest.raises(TypeError): ++ df.loc["bar"] *= 2 ++ ++- # from SO ++- # https://stackoverflow.com/questions/24572040/pandas-access-the-level-of-multiindex-for-inplace-operation ++- df_orig = DataFrame.from_dict( ++- { ++- "price": { ++- ("DE", "Coal", "Stock"): 2, ++- ("DE", "Gas", "Stock"): 4, ++- ("DE", "Elec", "Demand"): 1, ++- ("FR", "Gas", "Stock"): 5, ++- ("FR", "Solar", "SupIm"): 0, ++- ("FR", "Wind", "SupIm"): 0, ++- } ++- } ++- ) ++- df_orig.index = MultiIndex.from_tuples( ++- df_orig.index, names=["Sit", "Com", "Type"] ++- ) ++ ++- expected = df_orig.copy() ++- expected.iloc[[0, 2, 3]] *= 2 ++- ++- idx = pd.IndexSlice ++- df = df_orig.copy() ++- df.loc[idx[:, :, "Stock"], :] *= 2 ++- tm.assert_frame_equal(df, expected) ++- ++- df = df_orig.copy() ++- df.loc[idx[:, :, "Stock"], "price"] *= 2 ++- tm.assert_frame_equal(df, expected) ++ ++ def test_multiindex_assignment(self): ++ ++--- a/pandas/tests/indexing/test_chaining_and_caching.py +++++ b/pandas/tests/indexing/test_chaining_and_caching.py ++@@ -272,14 +272,6 @@ class TestChaining: ++ df["column1"] = df["column1"] + "c" ++ str(df) ++ ++- # from SO: ++- # https://stackoverflow.com/questions/24054495/potential-bug-setting-value-for-undefined-column-using-iloc ++- df = DataFrame(np.arange(0, 9), columns=["count"]) ++- df["group"] = "b" ++- ++- with pytest.raises(com.SettingWithCopyError): ++- df.iloc[0:5]["group"] = "a" ++- ++ # Mixed type setting but same dtype & changing dtype ++ df = DataFrame( ++ dict( ++--- a/pandas/tests/io/parser/test_common.py +++++ b/pandas/tests/io/parser/test_common.py ++@@ -1057,24 +1057,6 @@ def test_trailing_delimiters(all_parsers ++ tm.assert_frame_equal(result, expected) ++ ++ ++-def test_escapechar(all_parsers): ++- # https://stackoverflow.com/questions/13824840/feature-request-for- ++- # pandas-read-csv ++- data = '''SEARCH_TERM,ACTUAL_URL ++-"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord" ++-"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord" ++-"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa ++- ++- parser = all_parsers ++- result = parser.read_csv( ++- StringIO(data), escapechar="\\", quotechar='"', encoding="utf-8" ++- ) ++- ++- assert result["SEARCH_TERM"][2] == 'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie' ++- ++- tm.assert_index_equal(result.columns, Index(["SEARCH_TERM", "ACTUAL_URL"])) ++- ++- ++ def test_int64_min_issues(all_parsers): ++ # see gh-2599 ++ parser = all_parsers diff --cc debian/patches/series index 00000000,00000000..2a0f552f new file mode 100644 --- /dev/null +++ b/debian/patches/series @@@ -1,0 -1,0 +1,32 @@@ ++deb_nonversioneer_version.patch ++deb_doc_donotoverride_PYTHONPATH.patch ++xfail_tests_nonintel_io.patch ++deb_disable_googleanalytics.patch ++deb_no_strict_data.patch ++mathjax-path.patch ++xfail_tests_nonintel_nannat.patch ++skip_noencoding_locales.patch ++use_system_intersphinx.patch ++fix_shebangs.patch ++pyarrow_feather.patch ++contributor_list_not_in_tarball.patch ++tests_dont_assume_endian.patch ++fix_random_seeds.patch ++matplotlib32_compat.patch ++xfail_c_locale.patch ++sphinx_no_pandas_theme.patch ++spelling.patch ++privacy.patch ++link_security.patch ++find_test_data.patch ++fix_is_warnings.patch ++stable_test_urls.patch ++remove_ccbysa_snippets.patch ++test_promote_32bit.patch ++numba_fail_32bit.patch ++numpy119_compat.patch ++hurd_compat.patch ++matplotlib33_compat.patch ++968208_rounding.patch ++disable_pytest_asyncio.patch ++skip_crashing_test.patch diff --cc debian/patches/skip_crashing_test.patch index 00000000,00000000..3e15cfed new file mode 100644 --- /dev/null +++ b/debian/patches/skip_crashing_test.patch @@@ -1,0 -1,0 +1,16 @@@ ++Description: Skip a test that crashes on s390x ++ ++Author: Rebecca N. Palmer ++Forwarded: no ++ ++--- pandas-1.0.5+dfsg.orig/pandas/tests/window/test_apply.py +++++ pandas-1.0.5+dfsg/pandas/tests/window/test_apply.py ++@@ -13,6 +13,8 @@ def test_rolling_apply_invalid_raw(bad_r ++ Series(range(3)).rolling(1).apply(len, raw=bad_raw) ++ ++ +++from pandas.compat import is_platform_little_endian +++@pytest.mark.skipif(not is_platform_little_endian(), reason="segfaults on s390x") ++ def test_rolling_apply_out_of_bounds(engine_and_raw): ++ # gh-1850 ++ engine, raw = engine_and_raw diff --cc debian/patches/skip_noencoding_locales.patch index 00000000,00000000..42af90e3 new file mode 100644 --- /dev/null +++ b/debian/patches/skip_noencoding_locales.patch @@@ -1,0 -1,0 +1,25 @@@ ++Description: Don't test datetime in locales with no encoding ++ ++Some datetime tests run the test in every available locale. ++If this set includes locales without an encoding (currently dsb_DE ++and sah_RU), it fails due to Python bug ++https://bugs.python.org/issue20088 ++ ++Failure log ++https://tests.reproducible-builds.org/debian/rbuild/buster/amd64/pandas_0.23.3+dfsg-3.rbuild.log.gz ++ ++Author: Rebecca N. Palmer ++Bug: https://github.com/pandas-dev/pandas/issues/20957 ++Forwarded: no ++ ++--- a/pandas/_config/localization.py +++++ b/pandas/_config/localization.py ++@@ -99,6 +99,8 @@ def _valid_locales(locales, normalize): ++ ++ def _default_locale_getter(): ++ raw_locales = subprocess.check_output(["locale -a"], shell=True) +++ # skip locales without encoding, to avoid Python bug https://bugs.python.org/issue20088 +++ raw_locales = raw_locales.replace(b'\ndsb_DE\n',b'\n').replace(b'\nsah_RU\n',b'\n').replace(b'\ncrh_UA\n',b'\n') ++ return raw_locales ++ ++ diff --cc debian/patches/spelling.patch index 00000000,00000000..654de657 new file mode 100644 --- /dev/null +++ b/debian/patches/spelling.patch @@@ -1,0 -1,0 +1,39 @@@ ++Description: Fix typos ++ ++Author: Rebecca N. Palmer ++Origin: lintian ++Forwarded: not-needed (already fixed) ++ ++--- a/pandas/_libs/lib.pyx +++++ b/pandas/_libs/lib.pyx ++@@ -2012,7 +2012,7 @@ def maybe_convert_objects(ndarray[object ++ If an array-like object contains only timedelta values or NaT is ++ encountered, whether to convert and return an array of m8[ns] dtype. ++ convert_to_nullable_integer : bool, default False ++- If an array-like object contains only interger values (and NaN) is +++ If an array-like object contains only integer values (and NaN) is ++ encountered, whether to convert and return an IntegerArray. ++ ++ Returns ++--- a/pandas/core/arrays/boolean.py +++++ b/pandas/core/arrays/boolean.py ++@@ -390,7 +390,7 @@ class BooleanArray(BaseMaskedArray): ++ Returns ++ ------- ++ array : ndarray or ExtensionArray ++- NumPy ndarray, BooleanArray or IntergerArray with 'dtype' for its dtype. +++ NumPy ndarray, BooleanArray or IntegerArray with 'dtype' for its dtype. ++ ++ Raises ++ ------ ++--- a/pandas/core/arrays/integer.py +++++ b/pandas/core/arrays/integer.py ++@@ -439,7 +439,7 @@ class IntegerArray(BaseMaskedArray): ++ Returns ++ ------- ++ array : ndarray or IntegerArray ++- NumPy ndarray or IntergerArray with 'dtype' for its dtype. +++ NumPy ndarray or IntegerArray with 'dtype' for its dtype. ++ ++ Raises ++ ------ diff --cc debian/patches/sphinx_no_pandas_theme.patch index 00000000,00000000..6ea05d89 new file mode 100644 --- /dev/null +++ b/debian/patches/sphinx_no_pandas_theme.patch @@@ -1,0 -1,0 +1,18 @@@ ++Description: Don't require the pandas theme ++ ++(TODO - package it - here as a multi-upstream-tarball package?) ++ ++Author: Rebecca N. Palmer ++Forwarded: not-needed ++ ++--- a/doc/source/conf.py +++++ b/doc/source/conf.py ++@@ -196,7 +196,7 @@ pygments_style = "sphinx" ++ ++ # The theme to use for HTML and HTML Help pages. Major themes that come with ++ # Sphinx are currently 'default' and 'sphinxdoc'. ++-html_theme = "pydata_sphinx_theme" +++html_theme = "nature" ++ ++ # The style sheet to use for HTML and HTML Help pages. A file of that name ++ # must exist either in Sphinx' static/ path, or in one of the custom paths diff --cc debian/patches/stable_test_urls.patch index 00000000,00000000..1566a084 new file mode 100644 --- /dev/null +++ b/debian/patches/stable_test_urls.patch @@@ -1,0 -1,0 +1,55 @@@ ++Description: Use test URLs that are less likely to disappear ++ ++Avoid 404 errors in stable when upstream reorganize the test data ++(happened to two of these in 0.25 -> 1.0). ++ ++It is _not_ necessary to update the tag version on every package release, ++only if these tests fail because they expect moved/changed data. ++ ++Author: Rebecca N. Palmer ++Forwarded: not-needed ++ ++--- a/pandas/tests/io/excel/test_readers.py +++++ b/pandas/tests/io/excel/test_readers.py ++@@ -563,7 +563,7 @@ class TestReaders: ++ @tm.network ++ def test_read_from_http_url(self, read_ext): ++ url = ( ++- "https://raw.githubusercontent.com/pandas-dev/pandas/master/" +++ "https://raw.githubusercontent.com/pandas-dev/pandas/v1.0.3/" ++ "pandas/tests/io/data/excel/test1" + read_ext ++ ) ++ url_table = pd.read_excel(url) ++--- a/pandas/tests/io/parser/test_common.py +++++ b/pandas/tests/io/parser/test_common.py ++@@ -910,7 +910,7 @@ def test_url(all_parsers, csv_dir_path): ++ kwargs = dict(sep="\t") ++ ++ url = ( ++- "https://raw.github.com/pandas-dev/pandas/master/" +++ "https://github.com/pandas-dev/pandas/raw/v1.0.3/" ++ "pandas/tests/io/parser/data/salaries.csv" ++ ) ++ url_result = parser.read_csv(url, **kwargs) ++--- a/pandas/tests/io/parser/test_network.py +++++ b/pandas/tests/io/parser/test_network.py ++@@ -32,7 +32,7 @@ def check_compressed_urls(salaries_table ++ # test reading compressed urls with various engines and ++ # extension inference ++ base_url = ( ++- "https://github.com/pandas-dev/pandas/raw/master/" +++ "https://github.com/pandas-dev/pandas/raw/v1.0.3/" ++ "pandas/tests/io/parser/data/salaries.csv" ++ ) ++ ++--- a/pandas/tests/io/test_html.py +++++ b/pandas/tests/io/test_html.py ++@@ -134,7 +134,7 @@ class TestReadHtml: ++ @tm.network ++ def test_spam_url(self): ++ url = ( ++- "https://raw.githubusercontent.com/pandas-dev/pandas/master/" +++ "https://raw.githubusercontent.com/pandas-dev/pandas/v1.0.3/" ++ "pandas/tests/io/data/html/spam.html" ++ ) ++ df1 = self.read_html(url, ".*Water.*") diff --cc debian/patches/test_promote_32bit.patch index 00000000,00000000..8997ba08 new file mode 100644 --- /dev/null +++ b/debian/patches/test_promote_32bit.patch @@@ -1,0 -1,0 +1,37 @@@ ++Description: Don't fail when np.intc != np.int32 ++ ++np.intc (C int), np.int_ (C long) and np.longlong (C long long) ++are always distinct type objects, but only two of them are ++actually different sizes; np.int32 and np.int64 are aliases ++https://sources.debian.org/src/numpy/1:1.18.4-1/numpy/core/_type_aliases.py/#L110 ++ ++Author: Rebecca N. Palmer ++Bug: https://github.com/pandas-dev/pandas/issues/31856 ++Forwarded: not-needed https://github.com/pandas-dev/pandas/pull/33729/commits/16600575b4a19ceb7ceabbec6992e932e797e109 ++ ++--- a/pandas/tests/dtypes/cast/test_promote.py +++++ b/pandas/tests/dtypes/cast/test_promote.py ++@@ -96,19 +96,10 @@ def _check_promote(dtype, fill_value, ex ++ ++ def _assert_match(result_fill_value, expected_fill_value): ++ # GH#23982/25425 require the same type in addition to equality/NA-ness ++- res_type = type(result_fill_value) ++- ex_type = type(expected_fill_value) ++- if res_type.__name__ == "uint64": ++- # No idea why, but these (sometimes) do not compare as equal ++- assert ex_type.__name__ == "uint64" ++- elif res_type.__name__ == "ulonglong": ++- # On some builds we get this instead of np.uint64 ++- # Note: cant check res_type.dtype.itemsize directly on numpy 1.18 ++- assert res_type(0).itemsize == 8 ++- assert ex_type == res_type or ex_type == np.uint64 ++- else: ++- # On some builds, type comparison fails, e.g. np.int32 != np.int32 ++- assert res_type == ex_type or res_type.__name__ == ex_type.__name__ +++ # use np.dtype to treat "different" scalar types +++ # with the same data layout as equal +++ # e.g. np.intc and np.int32, np.ulonglong and np.uint64 +++ assert np.dtype(type(result_fill_value)) == np.dtype(type(expected_fill_value)) ++ ++ match_value = result_fill_value == expected_fill_value ++ diff --cc debian/patches/tests_dont_assume_endian.patch index 00000000,00000000..3c4a76e0 new file mode 100644 --- /dev/null +++ b/debian/patches/tests_dont_assume_endian.patch @@@ -1,0 -1,0 +1,81 @@@ ++Description: Fix or skip tests that assume the wrong endianness ++ ++This is a bug in the tests not pandas itself - ++the expected values explicitly specify little-endian ++ ++Author: Rebecca N. Palmer ++Forwarded: no ++ ++--- a/pandas/tests/frame/methods/test_to_records.py +++++ b/pandas/tests/frame/methods/test_to_records.py ++@@ -5,6 +5,7 @@ import pytest ++ ++ from pandas import CategoricalDtype, DataFrame, MultiIndex, Series, date_range ++ import pandas._testing as tm +++from pandas.compat import is_platform_little_endian ++ ++ ++ class TestDataFrameToRecords: ++@@ -239,6 +240,7 @@ class TestDataFrameToRecords: ++ ), ++ ], ++ ) +++ @pytest.mark.xfail(condition=not is_platform_little_endian(),reason="expected values assume little-endian",strict=False) ++ def test_to_records_dtype(self, kwargs, expected): ++ # see GH#18146 ++ df = DataFrame({"A": [1, 2], "B": [0.2, 1.5], "C": ["a", "bc"]}) ++@@ -312,11 +314,13 @@ class TestDataFrameToRecords: ++ ), ++ ], ++ ) +++ @pytest.mark.xfail(condition=not is_platform_little_endian(),reason="expected values assume little-endian",strict=False) ++ def test_to_records_dtype_mi(self, df, kwargs, expected): ++ # see GH#18146 ++ result = df.to_records(**kwargs) ++ tm.assert_almost_equal(result, expected) ++ +++ @pytest.mark.xfail(condition=not is_platform_little_endian(),reason="expected values assume little-endian",strict=False) ++ def test_to_records_dict_like(self): ++ # see GH#18146 ++ class DictLike: ++--- a/pandas/tests/scalar/timedelta/test_arithmetic.py +++++ b/pandas/tests/scalar/timedelta/test_arithmetic.py ++@@ -272,7 +272,7 @@ class TestTimedeltaAdditionSubtraction: ++ tm.assert_numpy_array_equal(np.array([2]) * td, expected) ++ msg = ( ++ "ufunc '?multiply'? cannot use operands with types" ++- r" dtype\(']m8\[ns\]'\) and dtype\('[<>]m8\[ns\]'\)" ++ ) ++ with pytest.raises(TypeError, match=msg): ++ td * other ++--- a/pandas/tests/io/parser/test_c_parser_only.py +++++ b/pandas/tests/io/parser/test_c_parser_only.py ++@@ -129,7 +129,7 @@ nan 2 ++ "the dtype timedelta64 is not supported for parsing", ++ dict(dtype={"A": "timedelta64", "B": "float64"}), ++ ), ++- ("the dtype ]U8 is not supported for parsing", dict(dtype={"A": "U8"})), ++ ], ++ ids=["dt64-0", "dt64-1", "td64", "U5") ++ tm.assert_numpy_array_equal(result, expected) ++ ++ # no missing values -> can convert to bool, otherwise raises diff --cc debian/patches/use_system_intersphinx.patch index 00000000,00000000..a667e84e new file mode 100644 --- /dev/null +++ b/debian/patches/use_system_intersphinx.patch @@@ -1,0 -1,0 +1,29 @@@ ++Description: Use packaged intersphinx indexes ++ ++Author: Rebecca N. Palmer ++Bug-Debian: https://bugs.debian.org/876417 ++Forwarded: not-needed ++ ++--- a/doc/source/conf.py +++++ b/doc/source/conf.py ++@@ -409,13 +409,13 @@ latex_documents = [ ++ if pattern is None: ++ intersphinx_mapping = { ++ "dateutil": ("https://dateutil.readthedocs.io/en/latest/", None), ++- "matplotlib": ("https://matplotlib.org/", None), ++- "numpy": ("https://numpy.org/doc/stable/", None), ++- "pandas-gbq": ("https://pandas-gbq.readthedocs.io/en/latest/", None), ++- "py": ("https://pylib.readthedocs.io/en/latest/", None), ++- "python": ("https://docs.python.org/3/", None), ++- "scipy": ("https://docs.scipy.org/doc/scipy/reference/", None), ++- "statsmodels": ("https://www.statsmodels.org/devel/", None), +++ "matplotlib": ("https://matplotlib.org/", "/usr/share/doc/python-matplotlib-doc/html/objects.inv"), +++ "numpy": ("https://numpy.org/doc/stable/", "/usr/share/doc/python-numpy-doc/html/objects.inv"), +++ "pandas-gbq": ("https://pandas-gbq.readthedocs.io/en/latest/", None), # not in Debian +++ "py": ("https://pylib.readthedocs.io/en/latest/", None), # no -doc in Debian +++ "python": ("https://docs.python.org/3/", "/usr/share/doc/python3-doc/html/objects.inv"), +++ "scipy": ("https://docs.scipy.org/doc/scipy/reference/", ("/usr/share/doc/python-scipy-doc/html/objects.inv","/usr/share/doc/python-scipy/html/objects.inv")), +++ "statsmodels": ("https://www.statsmodels.org/devel/", "/usr/share/doc/python-statsmodels-doc/html/objects.inv"), ++ } ++ ++ # extlinks alias diff --cc debian/patches/xfail_c_locale.patch index 00000000,00000000..518704af new file mode 100644 --- /dev/null +++ b/debian/patches/xfail_c_locale.patch @@@ -1,0 -1,0 +1,15 @@@ ++Description: Xfail a test that doesn't work in the C locale ++ ++Author: Rebecca N. Palmer ++Forwarded: no ++ ++--- a/pandas/tests/config/test_localization.py +++++ b/pandas/tests/config/test_localization.py ++@@ -95,6 +95,7 @@ def test_set_locale(lang, enc): ++ assert current_locale == _current_locale ++ ++ +++@pytest.mark.xfail(strict=False,reason="fails in C locale") ++ def test_encoding_detected(): ++ system_locale = os.environ.get("LC_ALL") ++ system_encoding = system_locale.split(".")[-1] if system_locale else "utf-8" diff --cc debian/patches/xfail_tests_nonintel_io.patch index 00000000,00000000..a89a00f6 new file mode 100644 --- /dev/null +++ b/debian/patches/xfail_tests_nonintel_io.patch @@@ -1,0 -1,0 +1,244 @@@ ++Description: HDF5 and Stata I/O are broken on some architectures ++ ++Fix some issues, warn on use and xfail tests for the remainder ++ ++HDF5 and Stata are known to fail on big-endian architectures ++Stata also fails on qemu-ppc64el, but not real ppc64el ++ ++In 0.25.3 HDF5 _crashes_ on armhf, so skip ++(pytest-forked allows continuing past a crash, ++but still seems to fail on xfailed crashes) ++ ++Author: Andreas Tille , Graham Inggs , Yaroslav Halchenko , Rebecca N. Palmer ++Bug-Debian: https://bugs.debian.org/877419 ++Forwarded: no ++ ++--- a/pandas/io/pytables.py +++++ b/pandas/io/pytables.py ++@@ -20,6 +20,10 @@ from typing import ( ++ Union, ++ ) ++ import warnings +++import platform +++import re +++from pandas.compat import is_platform_little_endian +++warn_hdf_platform = "Non-x86 system detected, HDF(5) format I/O may give wrong results (particularly on files created with older versions) or crash - https://bugs.debian.org/877419" if not bool(re.match('i.?86|x86',platform.uname()[4])) else False ++ ++ import numpy as np ++ ++@@ -511,6 +515,8 @@ class HDFStore: ++ fletcher32: bool = False, ++ **kwargs, ++ ): +++ if warn_hdf_platform: +++ warnings.warn(warn_hdf_platform) ++ ++ if "format" in kwargs: ++ raise ValueError("format is not a defined argument for HDFStore") ++@@ -724,7 +730,10 @@ class HDFStore: ++ self._handle.flush() ++ if fsync: ++ try: ++- os.fsync(self._handle.fileno()) +++ if is_platform_little_endian(): +++ os.fsync(self._handle.fileno()) +++ else: +++ os.sync() # due to a pytables bad-cast bug, fileno is invalid on 64-bit big-endian ++ except OSError: ++ pass ++ ++--- a/pandas/io/stata.py +++++ b/pandas/io/stata.py ++@@ -17,6 +17,9 @@ import struct ++ import sys ++ from typing import Any, Dict, Hashable, Optional, Sequence ++ import warnings +++import platform +++import re +++warn_stata_platform = "Non-x86 system detected, Stata format I/O may give wrong results (particularly on strings) - https://bugs.debian.org/877419" if not bool(re.match('i.?86|x86',platform.uname()[4])) else False ++ ++ from dateutil.relativedelta import relativedelta ++ import numpy as np ++@@ -855,6 +858,8 @@ class StataParser: ++ # NOTE: the byte type seems to be reserved for categorical variables ++ # with a label, but the underlying variable is -127 to 100 ++ # we're going to drop the label and cast to int +++ if warn_stata_platform: +++ warnings.warn(warn_stata_platform) ++ self.DTYPE_MAP = dict( ++ list(zip(range(1, 245), ["a" + str(i) for i in range(1, 245)])) ++ + [ ++--- a/pandas/tests/io/pytables/test_store.py +++++ b/pandas/tests/io/pytables/test_store.py ++@@ -54,6 +54,11 @@ from pandas.io.pytables import ( ++ ++ from pandas.io import pytables as pytables # noqa: E402 isort:skip ++ from pandas.io.pytables import TableIterator # noqa: E402 isort:skip +++import platform +++import re +++import sys +++is_crashing_arch=bool((platform.uname()[4].startswith('arm') or platform.uname()[4].startswith('aarch')) and sys.maxsize<=2**32) # meant for armhf, though this form will also skip on armel - uname = kernel arch +++pytestmark = pytest.mark.forked ++ ++ ++ _default_compressor = "blosc" ++@@ -1013,6 +1018,7 @@ class TestHDFStore: ++ check("table", index) ++ check("fixed", index) ++ +++ @pytest.mark.skipif(condition=is_crashing_arch,reason="https://bugs.debian.org/790925",strict=False) ++ @pytest.mark.skipif( ++ not is_platform_little_endian(), reason="reason platform is not little endian" ++ ) ++@@ -1045,6 +1051,7 @@ class TestHDFStore: ++ ], ++ ) ++ @pytest.mark.parametrize("dtype", ["category", object]) +++ @pytest.mark.skipif(condition=is_crashing_arch,reason="https://bugs.debian.org/790925",strict=False) ++ def test_latin_encoding(self, setup_path, dtype, val): ++ enc = "latin-1" ++ nan_rep = "" ++@@ -1241,6 +1248,7 @@ class TestHDFStore: ++ # still read from it. ++ pd.read_hdf(store, "k1") ++ +++ @pytest.mark.skipif(condition=is_crashing_arch,reason="https://bugs.debian.org/790925",strict=False) ++ def test_append_frame_column_oriented(self, setup_path): ++ with ensure_clean_store(setup_path) as store: ++ ++@@ -3804,6 +3812,7 @@ class TestHDFStore: ++ df.iloc[3:5, 1:3] = np.nan ++ df.iloc[8:10, -2] = np.nan ++ +++ @pytest.mark.skipif(condition=is_crashing_arch,reason="https://bugs.debian.org/790925",strict=False) ++ def test_select_filter_corner(self, setup_path): ++ ++ df = DataFrame(np.random.randn(50, 100)) ++@@ -4060,6 +4069,7 @@ class TestHDFStore: ++ assert isinstance(d1, DataFrame) ++ ++ @td.xfail_non_writeable +++ @pytest.mark.xfail(condition=not is_platform_little_endian(),reason="known failure of hdf on non-little endian",strict=False,raises=AttributeError) ++ def test_legacy_table_fixed_format_read_py2(self, datapath, setup_path): ++ # GH 24510 ++ # legacy table with fixed format written in Python 2 ++@@ -4740,6 +4750,7 @@ class TestHDFStore: ++ with pd.HDFStore(path) as store: ++ assert os.fspath(store) == str(path) ++ +++ @pytest.mark.xfail(condition=not is_platform_little_endian(),reason="known failure of hdf on non-little endian",strict=False,raises=AttributeError) ++ def test_read_py2_hdf_file_in_py3(self, datapath): ++ # GH 16781 ++ ++--- a/pandas/tests/io/test_stata.py +++++ b/pandas/tests/io/test_stata.py ++@@ -25,6 +25,8 @@ from pandas.io.stata import ( ++ read_stata, ++ ) ++ +++from pandas.compat import is_platform_little_endian +++pytestmark = pytest.mark.xfail(condition=not is_platform_little_endian(),reason="known failure of test_stata on non-little endian",strict=False) ++ ++ @pytest.fixture() ++ def mixed_frame(): ++@@ -199,7 +201,7 @@ class TestStata: ++ # parsed_113 = self.read_dta(self.dta2_113) ++ ++ # Remove resource warnings ++- w = [x for x in w if x.category is UserWarning] +++ w = [x for x in w if x.category is UserWarning and not "Non-x86 system detected" in str(x.message)] ++ ++ # should get warning for each call to read_dta ++ assert len(w) == 3 ++@@ -452,7 +454,7 @@ class TestStata: ++ warnings.simplefilter("always", InvalidColumnName) ++ original.to_stata(path, None, version=version) ++ # should get a warning for that format. ++- assert len(w) == 1 +++ assert len([x for x in w if not "Non-x86 system detected" in str(x.message)]) == 1 ++ ++ written_and_read_again = self.read_dta(path) ++ tm.assert_frame_equal(written_and_read_again.set_index("index"), formatted) ++@@ -1747,8 +1749,9 @@ has been incorrectly encoded by Stata or ++ the string values returned are correct.""" ++ with tm.assert_produces_warning(UnicodeWarning) as w: ++ encoded = read_stata(self.dta_encoding_118) ++- assert len(w) == 151 ++- assert w[0].message.args[0] == msg +++ w2 = [x for x in w if not "Non-x86 system detected" in str(x.message)] +++ assert len(w2) == 151 +++ assert w2[0].message.args[0] == msg ++ ++ expected = pd.DataFrame([["Düsseldorf"]] * 151, columns=["kreis1849"]) ++ tm.assert_frame_equal(encoded, expected) ++--- a/pandas/_testing.py +++++ b/pandas/_testing.py ++@@ -5,6 +5,8 @@ from datetime import datetime ++ from functools import wraps ++ import gzip ++ import os +++import platform +++import re ++ from shutil import rmtree ++ import string ++ import tempfile ++@@ -2481,6 +2483,8 @@ def assert_produces_warning( ++ ) ++ assert actual_warning.filename == caller.filename, msg ++ else: +++ if actual_warning.category==UserWarning and "Non-x86 system detected" in str(actual_warning.message) and not bool(re.match('i.?86|x86',platform.uname()[4])): +++ continue ++ extra_warnings.append( ++ ( ++ actual_warning.category.__name__, ++--- a/pandas/tests/io/pytables/test_timezones.py +++++ b/pandas/tests/io/pytables/test_timezones.py ++@@ -4,6 +4,7 @@ import numpy as np ++ import pytest ++ ++ import pandas.util._test_decorators as td +++from pandas.compat import is_platform_little_endian ++ ++ import pandas as pd ++ from pandas import DataFrame, DatetimeIndex, Series, Timestamp, date_range ++@@ -322,6 +323,7 @@ def test_store_timezone(setup_path): ++ tm.assert_frame_equal(result, df) ++ ++ +++@pytest.mark.xfail(condition=not is_platform_little_endian(),reason="known failure of hdf on non-little endian",strict=False,raises=AttributeError) ++ def test_legacy_datetimetz_object(datapath, setup_path): ++ # legacy from < 0.17.0 ++ # 8260 ++@@ -372,6 +374,7 @@ def test_read_with_where_tz_aware_index( ++ tm.assert_frame_equal(result, expected) ++ ++ +++@pytest.mark.xfail(condition=not is_platform_little_endian(),reason="known failure of hdf on non-little endian",strict=False,raises=AttributeError) ++ def test_py2_created_with_datetimez(datapath, setup_path): ++ # The test HDF5 file was created in Python 2, but could not be read in ++ # Python 3. ++--- a/pandas/tests/io/test_common.py +++++ b/pandas/tests/io/test_common.py ++@@ -8,7 +8,7 @@ from pathlib import Path ++ ++ import pytest ++ ++-from pandas.compat import is_platform_windows +++from pandas.compat import is_platform_windows, is_platform_little_endian ++ import pandas.util._test_decorators as td ++ ++ import pandas as pd ++@@ -215,11 +215,11 @@ bar2,12,13,14,15 ++ "feather", ++ ("io", "data", "feather", "feather-0_3_1.feather"), ++ ), ++- ( +++ pytest.param( ++ pd.read_hdf, ++ "tables", ++ ("io", "data", "legacy_hdf", "datetimetz_object.h5"), ++- ), +++ marks=pytest.mark.xfail(condition=not is_platform_little_endian(),reason="known failure of hdf on non-little endian",strict=False,raises=AttributeError)), ++ (pd.read_stata, "os", ("io", "data", "stata", "stata10_115.dta")), ++ (pd.read_sas, "os", ("io", "sas", "data", "test1.sas7bdat")), ++ (pd.read_json, "os", ("io", "json", "data", "tsframe_v012.json")), diff --cc debian/patches/xfail_tests_nonintel_nannat.patch index 00000000,00000000..7045bfd6 new file mode 100644 --- /dev/null +++ b/debian/patches/xfail_tests_nonintel_nannat.patch @@@ -1,0 -1,0 +1,194 @@@ ++Description: Xfail NaN <-> NaT tests on non-x86 and warn on cast ++ ++pd.Series([np.nan]).astype('datetime64[ns]')[0] = pd.NaT on x86 ++but 1970-01-01 on arm* because float NaN -> int is undefined: ++https://github.com/numpy/numpy/issues/8325 ++https://github.com/pandas-dev/pandas/issues/17792 ++https://github.com/pandas-dev/pandas/issues/26964 ++ ++On s390x it's the maximum _positive_ value (2**63-1 ns = year 2262) ++ ++Author: Andreas Tille , Graham Inggs , Rebecca N. Palmer ++Bug-Debian: https://bugs.debian.org/877754 ++Forwarded: no ++ ++--- a/pandas/core/dtypes/cast.py +++++ b/pandas/core/dtypes/cast.py ++@@ -1,6 +1,10 @@ ++ """ routings for casting """ ++ ++ from datetime import datetime, timedelta +++import warnings +++import platform +++import re +++warn_nannat_platform = "Non-x86 system detected, float -> datetime/timedelta may not handle NaNs correctly - https://bugs.debian.org/877754" if not bool(re.match('i.?86|x86',platform.uname()[4])) else False ++ ++ import numpy as np ++ ++@@ -891,6 +895,8 @@ def astype_nansafe(arr, dtype, copy: boo ++ f"'{dtype.name}[ns]' instead." ++ ) ++ raise ValueError(msg) +++ if warn_nannat_platform and (is_datetime64_dtype(dtype) or is_timedelta64_dtype(dtype)) and np.issubdtype(arr.dtype, np.floating) and not np.isfinite(arr).all(): +++ warnings.warn(warn_nannat_platform) ++ ++ if copy or is_object_dtype(arr) or is_object_dtype(dtype): ++ # Explicit copy, or required since NumPy can't view from / to object. ++@@ -1263,6 +1269,8 @@ def maybe_cast_to_datetime(value, dtype, ++ value = iNaT ++ else: ++ value = np.array(value, copy=False) +++ if warn_nannat_platform and np.issubdtype(value.dtype, np.floating) and not np.isfinite(value).all(): +++ warnings.warn(warn_nannat_platform) ++ ++ # have a scalar array-like (e.g. NaT) ++ if value.ndim == 0: ++--- a/pandas/tests/dtypes/cast/test_downcast.py +++++ b/pandas/tests/dtypes/cast/test_downcast.py ++@@ -7,6 +7,9 @@ from pandas.core.dtypes.cast import mayb ++ ++ from pandas import DatetimeIndex, Series, Timestamp ++ import pandas._testing as tm +++import platform +++import re +++is_nannat_working=bool(re.match('i.?86|x86',platform.uname()[4])) ++ ++ ++ @pytest.mark.parametrize( ++@@ -77,6 +80,7 @@ def test_downcast_conversion_empty(any_r ++ tm.assert_numpy_array_equal(result, np.array([], dtype=np.int64)) ++ ++ +++@pytest.mark.xfail(condition=not is_nannat_working,reason="https://bugs.debian.org/877754",strict=False) ++ @pytest.mark.parametrize("klass", [np.datetime64, np.timedelta64]) ++ def test_datetime_likes_nan(klass): ++ dtype = klass.__name__ + "[ns]" ++--- a/pandas/tests/frame/indexing/test_where.py +++++ b/pandas/tests/frame/indexing/test_where.py ++@@ -8,6 +8,9 @@ from pandas.core.dtypes.common import is ++ import pandas as pd ++ from pandas import DataFrame, DatetimeIndex, Series, Timestamp, date_range, isna ++ import pandas._testing as tm +++import platform +++import re +++is_nannat_working=bool(re.match('i.?86|x86|s390|ppc',platform.uname()[4])) ++ ++ ++ class TestDataFrameIndexingWhere: ++@@ -340,6 +343,7 @@ class TestDataFrameIndexingWhere: ++ result = a.where(do_not_replace, b) ++ tm.assert_frame_equal(result, expected) ++ +++ @pytest.mark.xfail(condition=not is_nannat_working,reason="https://bugs.debian.org/877754",strict=False)#not found ++ def test_where_datetime(self): ++ ++ # GH 3311 ++--- a/pandas/tests/frame/test_analytics.py +++++ b/pandas/tests/frame/test_analytics.py ++@@ -23,6 +23,9 @@ from pandas import ( ++ import pandas._testing as tm ++ import pandas.core.algorithms as algorithms ++ import pandas.core.nanops as nanops +++import platform +++import re +++is_nannat_working=bool(re.match('i.?86|x86|s390|ppc',platform.uname()[4])) ++ ++ ++ def assert_stat_op_calc( ++@@ -790,6 +793,7 @@ class TestDataFrameAnalytics: ++ expected = pd.Series(result, index=["A", "B"]) ++ tm.assert_series_equal(result, expected) ++ +++ @pytest.mark.xfail(condition=not is_nannat_working,reason="https://bugs.debian.org/877754",strict=False) ++ def test_sum_nanops_timedelta(self): ++ # prod isn't defined on timedeltas ++ idx = ["a", "b", "c"] ++--- a/pandas/tests/indexes/datetimes/test_datetime.py +++++ b/pandas/tests/indexes/datetimes/test_datetime.py ++@@ -7,6 +7,9 @@ import pytest ++ import pandas as pd ++ from pandas import DataFrame, DatetimeIndex, Index, Timestamp, date_range, offsets ++ import pandas._testing as tm +++import platform +++import re +++is_nannat_working=bool(re.match('i.?86|x86|s390|ppc',platform.uname()[4])) ++ ++ randn = np.random.randn ++ ++@@ -63,6 +66,7 @@ class TestDatetimeIndex: ++ idx2 = pd.date_range(end="2000", periods=periods, freq="S") ++ assert len(idx2) == periods ++ +++ @pytest.mark.xfail(condition=not is_nannat_working,reason="https://bugs.debian.org/877754",strict=False) ++ def test_nat(self): ++ assert DatetimeIndex([np.nan])[0] is pd.NaT ++ ++--- a/pandas/tests/reductions/test_reductions.py +++++ b/pandas/tests/reductions/test_reductions.py ++@@ -23,6 +23,9 @@ from pandas import ( ++ ) ++ import pandas._testing as tm ++ from pandas.core import nanops +++import platform +++import re +++is_nannat_working=bool(re.match('i.?86|x86|s390|ppc',platform.uname()[4])) ++ ++ ++ def get_objs(): ++@@ -1142,6 +1145,7 @@ class TestSeriesMode: ++ expected = Series(expected2, dtype=object) ++ tm.assert_series_equal(result, expected) ++ +++ @pytest.mark.xfail(condition=not is_nannat_working,reason="https://bugs.debian.org/877754",strict=False) ++ @pytest.mark.parametrize( ++ "dropna, expected1, expected2", ++ [ ++--- a/pandas/tests/series/test_constructors.py +++++ b/pandas/tests/series/test_constructors.py ++@@ -28,7 +28,9 @@ from pandas import ( ++ ) ++ import pandas._testing as tm ++ from pandas.core.arrays import IntervalArray, period_array ++- +++import platform +++import re +++is_nannat_working=bool(re.match('i.?86|x86|s390|ppc',platform.uname()[4])) ++ ++ class TestSeriesConstructors: ++ @pytest.mark.parametrize( ++@@ -960,6 +962,7 @@ class TestSeriesConstructors: ++ ++ tm.assert_series_equal(result, expected) ++ +++ @pytest.mark.xfail(condition=not is_nannat_working,reason="https://bugs.debian.org/877754",strict=False) ++ @pytest.mark.parametrize("arg", ["2013-01-01 00:00:00", pd.NaT, np.nan, None]) ++ def test_constructor_with_naive_string_and_datetimetz_dtype(self, arg): ++ # GH 17415: With naive string ++@@ -1272,6 +1275,7 @@ class TestSeriesConstructors: ++ series[2] = val ++ assert isna(series[2]) ++ +++ @pytest.mark.xfail(condition=not is_nannat_working,reason="https://bugs.debian.org/877754",strict=False) ++ def test_NaT_cast(self): ++ # GH10747 ++ result = Series([np.nan]).astype("M8[ns]") ++--- a/pandas/tests/test_algos.py +++++ b/pandas/tests/test_algos.py ++@@ -35,6 +35,9 @@ from pandas.conftest import BYTES_DTYPES ++ import pandas.core.algorithms as algos ++ from pandas.core.arrays import DatetimeArray ++ import pandas.core.common as com +++import platform +++import re +++is_nannat_working=bool(re.match('i.?86|x86|s390|ppc',platform.uname()[4])) ++ ++ ++ class TestFactorize: ++@@ -1046,6 +1049,7 @@ class TestValueCounts: ++ expected = Series([2, 1, 1], index=[5.0, 10.3, np.nan]) ++ tm.assert_series_equal(result, expected) ++ +++ @pytest.mark.xfail(condition=not is_nannat_working,reason="https://bugs.debian.org/877754",strict=False) ++ def test_value_counts_normalized(self): ++ # GH12558 ++ s = Series([1, 2, np.nan, np.nan, np.nan]) diff --cc debian/python-pandas-doc.doc-base index 00000000,00000000..b29cae02 new file mode 100644 --- /dev/null +++ b/debian/python-pandas-doc.doc-base @@@ -1,0 -1,0 +1,7 @@@ ++Document: python3-pandas ++Title: pandas - powerful Python data analysis toolkit ++Section: Science/Data Analysis ++ ++Format: HTML ++Index: /usr/share/doc/python-pandas-doc/html/index.html ++Files: /usr/share/doc/python-pandas-doc/html/* diff --cc debian/python-pandas-doc.docs index 00000000,00000000..8137b041 new file mode 100644 --- /dev/null +++ b/debian/python-pandas-doc.docs @@@ -1,0 -1,0 +1,1 @@@ ++doc/build/html diff --cc debian/python-pandas-doc.links index 00000000,00000000..4ef46387 new file mode 100644 --- /dev/null +++ b/debian/python-pandas-doc.links @@@ -1,0 -1,0 +1,4 @@@ ++usr/share/javascript/jquery/jquery.js usr/share/doc/python-pandas-doc/html/_static/jquery.js ++usr/share/javascript/underscore/underscore.js usr/share/doc/python-pandas-doc/html/_static/underscore.js ++usr/share/doc/python-pandas-doc/html/whatsnew/index.html.gz usr/share/doc/python-pandas-doc/NEWS.html.gz ++usr/share/doc/python-pandas-doc/html/whatsnew/index.html.gz usr/share/doc/python3-pandas/NEWS.html.gz diff --cc debian/python3-pandas-lib.install index 00000000,00000000..056ad6b6 new file mode 100644 --- /dev/null +++ b/debian/python3-pandas-lib.install @@@ -1,0 -1,0 +1,2 @@@ ++usr/lib/python3/dist-packages/pandas/*/*/*.so ++usr/lib/python3/dist-packages/pandas/*/*.so diff --cc debian/python3-pandas.install index 00000000,00000000..eae3930a new file mode 100644 --- /dev/null +++ b/debian/python3-pandas.install @@@ -1,0 -1,0 +1,1 @@@ ++usr/lib/python3/ diff --cc debian/rules index 00000000,00000000..1bf3c437 new file mode 100755 --- /dev/null +++ b/debian/rules @@@ -1,0 -1,0 +1,156 @@@ ++#!/usr/bin/make -f ++# -*- mode: makefile; coding: utf-8 -*- ++ ++export DEB_BUILD_MAINT_OPTIONS = hardening=+all ++ ++# Pass hardening flags into distutils, explicitly ++export CFLAGS = $(shell dpkg-buildflags --get CFLAGS) ++export CPPFLAGS = $(shell dpkg-buildflags --get CPPFLAGS) ++export CXXFLAGS = $(shell dpkg-buildflags --get CXXFLAGS) ++ ++PACKAGE3_NAME = python3-pandas ++PACKAGE3_ROOT_DIR = debian/${PACKAGE3_NAME} ++ ++PY3VERS = $(shell py3versions -vr) ++PY3VER = $(shell py3versions -vd) ++SOURCE_DATE:=$(shell date -u -d "@$(SOURCE_DATE_EPOCH)" "+%a, %d %b %Y" || echo "xxx, xx xxx xxxx") ++SOURCE_TIME:=$(shell date -u -d "@$(SOURCE_DATE_EPOCH)" "+%T" || echo "xx:xx:xx") ++ ++UVER := $(shell LC_ALL=C dpkg-parsechangelog | awk '/^Version:/{print $$2;}' | sed -e 's,-[^-]*$$,,g' | sed -e 's,+dfsg,,g') ++# Python doesn't use ~ for rc ++UVER_PY := $(shell echo $(UVER) | sed -e 's,[~],,g') ++UVER_PYSHORT := $(shell echo $(UVER_PY) | sed -e 's,+git.*,,g') ++ ++MIN_CYTHONVER = 0.23 ++ ++# Filter out tests with "marker expressions" and "keyword expressions". Ref: pytest(1) ++ifeq ($(DEB_HOST_ARCH),$(filter $(DEB_HOST_ARCH), amd64 i386 kfreebsd-amd64 kfreebsd-i386 x32)) ++ PYTEST_MARKER_ARCH := ++else ++ PYTEST_MARKER_ARCH := and not slow ++endif ++PYTEST_MARKER := not network $(PYTEST_MARKER_ARCH) ++ ++# try to prevent unsunctioned downloads ++export http_proxy=http://127.0.0.1:9/ ++export https_proxy=http://127.0.0.1:9/ ++ ++export SHELL=/bin/bash ++ ++# Mega rule ++%: ++ : # Explicit build system to avoid use of all-in-1 Makefile ++ dh $@ --buildsystem=pybuild --with python3 ++ ++clean_generated: ++ find pandas/ -regex '.*\.c\(\|pp\)' | xargs grep -l -e 'Generated by Cython' | xargs -r rm -f ++ ++_cythonize%: ++ debian/rules clean_generated # force removal of previous copies ++ python$(*:2=) setup.py cython ++ D=debian/cythonized-files$(*:2=) && \ ++ git rm -rf $$D; \ ++ find pandas/ -regex '.*\.c\(\|pp\)' | while read f; do \ ++ grep -q 'Generated by Cython' "$$f" || continue; \ ++ mkdir -p "$$D/$$(dirname $$f)"; \ ++ cp "$$f" "$$D/$$(dirname $$f)"; \ ++ git add -f "$$D/$$f"; \ ++ done; \ ++ echo "$(UVER)" >| $$D/VERSION; git add $$D/VERSION ++ ++_uncythonize%: ++ echo "$*" | grep -q '^3' && PY=3 || PY= ; \ ++ CYTHONVER=$$(dpkg -l cython$$PY 2>/dev/null | awk '/^ii/{print $$3;}' || echo 0); \ ++ dpkg --compare-versions "$$CYTHONVER" lt "$(MIN_CYTHONVER)" && { \ ++ echo "I: Using pre-Cython-ed files for Python $*"; \ ++ cd debian/cythonized-files$$PY/ ; \ ++ find . -regex '.*\.c\(\|pp\)' | while read f; do cp $$f ../../$$f; done; } || : ++ ++cythonize: _cythonize3 ++ ++override_dh_clean: clean_generated ++ : # Make sure that cythonized sources are up-to-date ++ [ ! -e debian/cythonized-files3/VERSION ] || [ "$(UVER)" = "`cat debian/cythonized-files3/VERSION`" ] ++ rm -rf build doc/_build *-stamp # pandas.egg-info pandas/datasets/__config__.py ++ dh_clean ++ ++version_py: ++ [ -e pandas/__version.py ] || \ ++ echo -e "version = '$(UVER_PY)'\nshort_version = '$(UVER_PYSHORT)'" > pandas/__version.py ++ ++override_dh_auto_build: version_py ++ # Override default build operation which --force's re-cythonization ++ # on elderly ubuntus ++ # Just build the version.py file ++ : ++ ++ ++override_dh_auto_install: ${PY3VERS:%=python-install%} ${PY3VERS:%=python-test%} ++# Per Python version logic -- install, test, remomove .so (installed into -lib) ++python-install%: _uncythonize% ++ python$* setup.py install --install-layout=deb --root=$(CURDIR)/debian/tmp ++ ++python-test%: python-install% ++ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS))) ++ echo "backend : Agg" >| $(CURDIR)/build/matplotlibrc ++ : # Run unittests here against installed pandas, in sections to avoid out-of-memory crash (#943732); exit code 5 means no tests in this file ++ echo "$*" | grep -q '^3' && PY=3 || PY=$*; \ ++ export PYTHONPATH=`/bin/ls -d $$PWD/debian/tmp/usr/lib/python$$PY/*/`; \ ++ export MPLCONFIGDIR=$(CURDIR)/build HOME=$(CURDIR)/build; \ ++ python$* pandas/util/_print_versions.py; \ ++ cd build/; TEST_SUCCESS=true; for TEST_SUBSET in $${PYTHONPATH}/pandas/tests/* ; do \ ++ LOCALE_OVERRIDE=C xvfb-run -a -s "-screen 0 1280x1024x24 -noreset" \ ++ python$* -m pytest -s -v -m "$(PYTEST_MARKER)" --confcutdir=$${PYTHONPATH}/pandas --deb-data-root-dir=$(CURDIR)/pandas/tests --strict-data-files $${TEST_SUBSET} || test $$? == 5 || TEST_SUCCESS=false \ ++ ; done ; $${TEST_SUCCESS} ++else ++ : # Skip unittests due to nocheck ++endif ++ ++override_dh_installdocs: ++ : # Build Documentation using installed pandas ++ifeq (,$(filter nodoc,$(DEB_BUILD_OPTIONS))) ++ifneq (,$(findstring -a,$(DH_INTERNAL_OPTIONS))) ++ : # not building documentation in -a ++else ++ cd doc && PYTHONPATH=$(CURDIR)/$(PACKAGE3_ROOT_DIR)-lib/usr/lib/python3/dist-packages:$(CURDIR)/$(PACKAGE3_ROOT_DIR)/usr/lib/python3/dist-packages MPLCONFIGDIR=$(CURDIR)/build HOME=$(CURDIR)/build LC_ALL=C python3 make.py html ++ # strip build paths and statsmodels timestamps for reproducibility, and online Javascript for privacy ++ for html in `find doc/build/html -name _modules -prune -o -name "*.html" -o -name "*.ipynb"` ; do \ ++ sed -i -e 's#$(CURDIR)/.pybuild/[^/]*/build/pandas/#/usr/lib/python3/dist-packages/pandas/#g' \ ++ -e 's#$(CURDIR)/debian/python3-pandas/usr/lib/python3/dist-packages/pandas/#/usr/lib/python3/dist-packages/pandas/#g' \ ++ -e 's#