Import pandas_2.2.3+dfsg-7.debian.tar.xz
authorRebecca N. Palmer <rebecca_palmer@zoho.com>
Tue, 28 Jan 2025 22:18:06 +0000 (22:18 +0000)
committerRebecca N. Palmer <rebecca_palmer@zoho.com>
Tue, 28 Jan 2025 22:18:06 +0000 (22:18 +0000)
[dgit import tarball pandas 2.2.3+dfsg-7 pandas_2.2.3+dfsg-7.debian.tar.xz]

62 files changed:
README.source [new file with mode: 0644]
changelog [new file with mode: 0644]
contributors_list.txt [new file with mode: 0644]
control [new file with mode: 0644]
copyright [new file with mode: 0644]
gbp.conf [new file with mode: 0644]
patches/1029251_ignore_rounding_error.patch [new file with mode: 0644]
patches/1068104_time64.patch [new file with mode: 0644]
patches/1068422_ignore_dask_tests.patch [new file with mode: 0644]
patches/1088988_xarray_pyreadstat_compat.patch [new file with mode: 0644]
patches/2p1_openpyxl_errors.patch [new file with mode: 0644]
patches/accept_system_tzdata.patch [new file with mode: 0644]
patches/add_missing_importorskip.patch [new file with mode: 0644]
patches/allow_no_matplotlib.patch [new file with mode: 0644]
patches/allow_no_openpyxl.patch [new file with mode: 0644]
patches/armel_ignore_nonwarning.patch [new file with mode: 0644]
patches/blosc_nonstrict_xfail.patch [new file with mode: 0644]
patches/contributor_list_not_in_tarball.patch [new file with mode: 0644]
patches/deb_disable_analytics.patch [new file with mode: 0644]
patches/deb_doc_donotoverride_PYTHONPATH.patch [new file with mode: 0644]
patches/deb_nonversioneer_version.patch [new file with mode: 0644]
patches/find_test_data.patch [new file with mode: 0644]
patches/fix_overly_arch_specific_xfails.patch [new file with mode: 0644]
patches/fix_random_seeds.patch [new file with mode: 0644]
patches/hurd_compat.patch [new file with mode: 0644]
patches/ignore_ipython_exceptions.patch [new file with mode: 0644]
patches/ignore_python3p12_deprecations.patch [new file with mode: 0644]
patches/ignore_test_1094417.patch [new file with mode: 0644]
patches/mathjax-path.patch [new file with mode: 0644]
patches/mips_pow_nan.patch [new file with mode: 0644]
patches/no_pkg_resources.patch [new file with mode: 0644]
patches/no_pytz_datetime.patch [new file with mode: 0644]
patches/numba_fail_32bit.patch [new file with mode: 0644]
patches/numba_warn_nonx86.patch [new file with mode: 0644]
patches/privacy.patch [new file with mode: 0644]
patches/privacy2.patch [new file with mode: 0644]
patches/pytables_python3p12.patch [new file with mode: 0644]
patches/remove_ccbysa_snippets.patch [new file with mode: 0644]
patches/series [new file with mode: 0644]
patches/skip_test_missing_required_dependency.patch [new file with mode: 0644]
patches/sum_loosen_test_tolerance.patch [new file with mode: 0644]
patches/tests_dont_assume_64bit.patch [new file with mode: 0644]
patches/tests_dont_assume_endian.patch [new file with mode: 0644]
patches/unbreak_clean.patch [new file with mode: 0644]
patches/use_system_intersphinx.patch [new file with mode: 0644]
patches/value_counts_nat_numpy2.patch [new file with mode: 0644]
patches/versioned_importorskip.patch [new file with mode: 0644]
patches/xarray_version_workaround.patch [new file with mode: 0644]
patches/xfail_tests_nonintel_io.patch [new file with mode: 0644]
python-pandas-doc.doc-base [new file with mode: 0644]
python-pandas-doc.docs [new file with mode: 0644]
python-pandas-doc.links [new file with mode: 0644]
rules [new file with mode: 0755]
salsa-ci.yml [new file with mode: 0644]
source/format [new file with mode: 0644]
source/lintian-overrides [new file with mode: 0644]
source/options [new file with mode: 0644]
tests/control [new file with mode: 0644]
tests/ignoredtests [new file with mode: 0755]
tests/unittests3 [new file with mode: 0755]
upstream/metadata [new file with mode: 0644]
watch [new file with mode: 0644]

diff --git a/README.source b/README.source
new file mode 100644 (file)
index 0000000..c60acd4
--- /dev/null
@@ -0,0 +1,18 @@
+New upstream and contributors_list update (warning: assumes my directory layout):
+
+#update this: export version=1.5.1
+cd ~/Debian/sourcepkgs/pandas
+GBP_CONF_FILES=~/.gbp.conf gbp import-orig --upstream-branch=upstream  --debian-branch=main --uscan --pristine-tar
+echo "Current to version ${version} (generated by git shortlog -ns)" > debian/contributors_list.txt
+echo "There may be multiple entries for the same person if they have used more than one (form of their) name" >> debian/contributors_list.txt
+echo "https://github.com/pandas-dev/pandas/graphs/contributors" >> debian/contributors_list.txt
+echo "" >> debian/contributors_list.txt
+cd ~/Debian/upstreams/pandas
+git pull
+git checkout v${version}
+git shortlog -ns >> ~/Debian/sourcepkgs/pandas/debian/contributors_list.txt
+git checkout main
+cd ~/Debian/sourcepkgs/pandas
+echo "" >> debian/contributors_list.txt
+echo "Debian packaging" >> debian/contributors_list.txt
+git shortlog -ns -- debian >> debian/contributors_list.txt
diff --git a/changelog b/changelog
new file mode 100644 (file)
index 0000000..d592a0a
--- /dev/null
+++ b/changelog
@@ -0,0 +1,1825 @@
+pandas (2.2.3+dfsg-7) unstable; urgency=medium
+
+  * Re-add some but not all build-deps, stop ignoring most tests
+    and documentation fails (but still ignore #1094417).
+  * Temporarily move matplotlib to Build-Depends-Indep.
+  * Tests: remove no longer needed xfails.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Tue, 28 Jan 2025 22:18:06 +0000
+
+pandas (2.2.3+dfsg-6) unstable; urgency=medium
+
+  * Tests: accept recent improvements in xarray and pyreadstat,
+    avoid crash when creating reference.
+  * Docs privacy: use local file for logo.
+  * Temporarily ignore tests and documentation fails and drop some
+    build-deps to unblock numpy2 transition
+    (workaround for #1088988, #1094385).
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Tue, 28 Jan 2025 07:22:40 +0000
+
+pandas (2.2.3+dfsg-5) unstable; urgency=medium
+
+  * Docs: switch back to pydata-sphinx-theme.
+  * Docs: (re-)add build-dependencies we now can satisfy.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Mon, 21 Oct 2024 19:43:11 +0100
+
+pandas (2.2.3+dfsg-4) unstable; urgency=medium
+
+  * Re-enable docs using a different theme (workaround for #1084781).
+  * Re-add dropped build-depends.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Fri, 11 Oct 2024 10:18:43 +0100
+
+pandas (2.2.3+dfsg-3) unstable; urgency=medium
+
+  * Ignore docs build fail (workaround for #1084781) and print log.
+  * Docs: remove old pydata-sphinx-theme workarounds.
+  * Temporarily drop some build-depends to break bootstrap cycle.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Tue, 08 Oct 2024 22:14:45 +0100
+
+pandas (2.2.3+dfsg-2) unstable; urgency=medium
+
+  * Move pyreadstat to Build-Depends-Indep to break cycle.
+  * Allow building on ports without full test-Depends.
+  * Re-enable sqlalchemy tests.
+  * Stop using pkg_resources. (Closes: #1083523)
+  * Tests: stop passing non-constant pytz.timezone
+    directly to datetime.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sun, 06 Oct 2024 16:40:59 +0100
+
+pandas (2.2.3+dfsg-1) unstable; urgency=medium
+
+  * New upstream release. (Closes: #1082096)
+  * Update contributors, refresh patches.
+  * Tests: keep 32-bit intervaltree xfailed.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sat, 21 Sep 2024 17:21:40 +0100
+
+pandas (2.2.2+dfsg-4) unstable; urgency=medium
+
+  * Tests: re-enable bottleneck and tabulate (see #1070359, #1070360),
+    make blosc xfail nonstrict, use pyproject.toml in autopkgtest,
+    run autopkgtest in CI, be less verbose to fit in the CI log.
+  * Add transition Breaks.
+  * Upload to unstable. (Closes: #1069792)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sun, 07 Jul 2024 19:36:37 +0100
+
+pandas (2.2.2+dfsg-3) experimental; urgency=medium
+
+  * Tests: add forgotten import.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Mon, 06 May 2024 14:47:52 +0100
+
+pandas (2.2.2+dfsg-2) experimental; urgency=medium
+
+  * Allow importing pandas._testing without pytest.
+  * Tests: don't require 32-bit to imply time32.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Mon, 06 May 2024 11:29:54 +0100
+
+pandas (2.2.2+dfsg-1) experimental; urgency=medium
+
+  * New upstream release.  Update copyright, patches, depends.
+  * Upload to experimental, due to potential breakage (see #1069792).
+  * Tests: use our test data path, skip too-old dependencies,
+    mark some tests as requiring optional dependencies,
+    remove no longer needed patches, clean up afterwards.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sun, 05 May 2024 14:40:45 +0100
+
+pandas (2.1.4+dfsg-8) unstable; urgency=medium
+
+  * Re-enable the documentation.
+  * Bump Standards-Version to 4.7.0 (no changes needed).
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sun, 21 Apr 2024 13:50:13 +0100
+
+pandas (2.1.4+dfsg-7) unstable; urgency=medium
+
+  * Tests: don't require 32-bit to imply time32. (Closes: #1068104)
+  * Temporarily disable the documentation (workaround for #1068349).
+  * Tests: temporarily ignore dask tests (workaround for #1068422).
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Fri, 05 Apr 2024 22:44:46 +0100
+
+pandas (2.1.4+dfsg-6) unstable; urgency=medium
+
+  * Tests: try again to stop testing __pycache__. (Closes: #1063959)
+  * Tests: avoid FutureWarning from newer xarray. (Closes: #1066801)
+  * Drop pytables test-depends (skipping tests) on architectures
+    where it is not available. (Closes: #1064384)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sat, 16 Mar 2024 17:11:33 +0000
+
+pandas (2.1.4+dfsg-5) unstable; urgency=medium
+
+  * Tests: stop trying to test __pycache__. (Closes: #1063959)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sun, 18 Feb 2024 20:31:18 +0000
+
+pandas (2.1.4+dfsg-4) unstable; urgency=medium
+
+  * Tests: shorten ignoredtests to avoid timeout.
+  * Temporarily skip numba tests (workaround for #1033907).
+  * Update transition Breaks.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Fri, 09 Feb 2024 20:48:14 +0000
+
+pandas (2.1.4+dfsg-3) unstable; urgency=medium
+
+  * Add more transition Breaks (see #1043240).
+  * Upload to unstable. (Closes: #1056828)
+  * Tests: don't fail when (random) sum test input sums to near-0,
+    use our paths, depend on pytest-localserver, skip broken test.
+  * Docs: re-enable style.ipynb.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Thu, 01 Feb 2024 07:52:49 +0000
+
+pandas (2.1.4+dfsg-2) experimental; urgency=medium
+
+  * Fix autopkgtest syntax error.
+  * Add more transition Breaks.
+  * Tests: try again to ignore 1**NaN, NaN**0 NaNs on mips*.
+  * Tests: be compatible with current blosc2. (Closes: #1061043)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sun, 21 Jan 2024 18:44:54 +0000
+
+pandas (2.1.4+dfsg-1) experimental; urgency=medium
+
+  * Upstream bugfix release.  Update contributors_list and patches.
+  * Add transition Breaks.
+  * Temporarily disable style.ipynb (workaround for #1057309).
+  * Tests: ignore some DeprecationWarnings and pytables exceptions
+    to allow building in Python 3.12 (see #1055801).
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Fri, 08 Dec 2023 22:06:00 +0000
+
+pandas (2.1.3+dfsg-1) experimental; urgency=medium
+
+  * Upstream bugfix release.  Update contributors_list and patches.
+  * Re-enable numexpr for **, as upstream have decided to accept
+    its different overflow behaviour.
+  * Tests: fix autopkgtest syntax errors.
+  * Allow building on ports without pyreadstat or matplotlib.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sun, 12 Nov 2023 21:53:27 +0000
+
+pandas (2.1.1+dfsg-2) experimental; urgency=medium
+
+  * Tests: don't fail when xlsxwriter is not installed.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Fri, 13 Oct 2023 19:43:13 +0100
+
+pandas (2.1.1+dfsg-1) experimental; urgency=medium
+
+  * Upstream bugfix release.  Update contributors_list and patches.
+  * Don't test-depend on tabulate, because our version is too old.
+  * Extend find_test_data to cover a new fixture.
+  * Tests: try again to ignore 1**NaN, NaN**0 NaNs on mips*.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Wed, 11 Oct 2023 07:40:12 +0100
+
+pandas (2.1.0+dfsg-1) experimental; urgency=medium
+
+  * New upstream release.  Update copyright, patches, dependencies.
+  * Tests: try again to ignore 1**NaN, NaN**0 NaNs on mips*,
+    ignore warnings on Itanium,
+    run with fixed locale set, skip not fail without matplotlib.
+  * Docs: fix bug in previous reproducibility patch.
+  * d/watch: Use git, as the tarball now excludes docs and test data.
+  * Don't crash in setup.py clean.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sun, 03 Sep 2023 22:06:46 +0100
+
+pandas (2.0.3+dfsg-6) experimental; urgency=medium
+
+  * Tests: try again to ignore NaN warnings on mips*.
+  * Build on ports without sqlalchemy or working matplotlib.
+  * Reproducibility: strip object addresses,
+    disable some timestamps and random IDs.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sun, 27 Aug 2023 11:17:44 +0100
+
+pandas (2.0.3+dfsg-5) experimental; urgency=medium
+
+  * Fix SAS I/O crash on armhf.
+  * Clean up after documentation build.
+  * Tests: ignore NaN warnings and 1**NaN, NaN**0 NaNs on mips*.
+  * Docs: use fixed seeds for reproducibility.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Fri, 25 Aug 2023 21:26:09 +0100
+
+pandas (2.0.3+dfsg-4) experimental; urgency=medium
+
+  * Ignore numba errors on most non-x86 systems (workaround
+    for #1033907).  This already warns the user.
+  * Run but ignore some previously skipped tests.
+  * Tests: revert "don't use : in numexpr variable names"
+    as that wasn't actually the problem.
+  * Tests: ignore another non-warning on armel.
+  * Fix spelling.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sun, 20 Aug 2023 09:26:03 +0100
+
+pandas (2.0.3+dfsg-3) experimental; urgency=medium
+
+  * Don't use numexpr for **, as it has different overflow behaviour.
+  * Tests: don't use : in numexpr variable names.
+  * Fix missing import on arm*.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Thu, 17 Aug 2023 19:51:45 +0100
+
+pandas (2.0.3+dfsg-2) experimental; urgency=medium
+
+  * Clean up after tests.
+  * Don't build in bare C locale.
+  * Docs: make more plots reproducible.
+  * Remove the warning that results may be wrong on mips*,
+    as this appears to have been fixed.
+  * Tests: don't fail on warnings we added.
+  * Remove unnecessary HDF/Stata I/O warnings.
+  * Depend on system tzdata not PyPI tzdata (see #1043968).
+    Use tzdata-legacy where old-style timezone names are used.
+  * Tests: re-enable dask tests (see #1043093).
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sun, 13 Aug 2023 21:55:15 +0100
+
+pandas (2.0.3+dfsg-1) experimental; urgency=medium
+
+  * New upstream release.  Drop/refresh patches, update dependencies.
+  * Upload to experimental, due to potential breakage.
+  * Add dask Breaks and disable tests (see #1043093).
+  * Tests: re-enable numba tests.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sun, 06 Aug 2023 11:02:39 +0100
+
+pandas (1.5.3+dfsg-4) unstable; urgency=medium
+
+  * Docs: allow building with Sphinx 7.  (Closes: #1042672)
+  * Remove unused python3-six dependencies.  (Closes: #1039441)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sun, 30 Jul 2023 22:34:06 +0100
+
+pandas (1.5.3+dfsg-3) unstable; urgency=medium
+
+  * Tests: don't fail with fsspec 2023.  (Closes: #1042043)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Wed, 26 Jul 2023 07:57:11 +0100
+
+pandas (1.5.3+dfsg-2) unstable; urgency=medium
+
+  * Tests: use a non-backzone timezone.  (Closes: #1031437)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sun, 19 Feb 2023 11:01:48 +0000
+
+pandas (1.5.3+dfsg-1) unstable; urgency=medium
+
+  * Upstream bugfix release.  Update copyright and patches.
+  * Tests: ignore plot rounding errors.  (Closes: #1029251)
+  * Tests: re-enable dask test, ignore numpy 1.24 warning.
+  * Docs: re-enable style.ipynb.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sun, 22 Jan 2023 11:54:30 +0000
+
+pandas (1.5.2+dfsg-6) unstable; urgency=medium
+
+  * Move xarray to Build-Depends-Indep to break circular dependency.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Wed, 11 Jan 2023 07:34:28 +0000
+
+pandas (1.5.2+dfsg-5) unstable; urgency=medium
+
+  * Fix or ignore warnings from numpy 1.24.
+  * Stop ignoring tests on mips*, thanks to Adrian Bunk.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Tue, 10 Jan 2023 20:48:34 +0000
+
+pandas (1.5.2+dfsg-4) unstable; urgency=medium
+
+  * Add dask transition Breaks (see #1025393).
+  * Don't try to load intersphinx links from python-numpy-doc,
+    as it no longer exists.
+  * Upload to unstable.  (Closes: #1023965, #1022571)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Mon, 09 Jan 2023 19:45:45 +0000
+
+pandas (1.5.2+dfsg-3) experimental; urgency=medium
+
+  * Tests: ignore a numpy warning.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sat, 07 Jan 2023 10:09:11 +0000
+
+pandas (1.5.2+dfsg-2) experimental; urgency=medium
+
+  * Fix date_range overflow in 32-bit.  (Closes: #1026351)
+  * Don't try to load intersphinx links from python-matplotlib-doc,
+    as it no longer exists.  (Closes: #1027576)
+  * Re-enable parso-using tests (as #1023896 has been fixed).
+  * Bump Standards-Version to 4.6.2 (no changes needed).
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Fri, 06 Jan 2023 21:36:03 +0000
+
+pandas (1.5.2+dfsg-1) experimental; urgency=medium
+
+  * Upstream bugfix release.  Update contributors_list.
+  * Fix Lintian typo.
+  * Xfail parso-using tests (workaround for #1023896).
+  * Temporarily drop numba test-depends (skips tests),
+    as it is uninstallable due to #1024795.
+  * Add transition Breaks (see #1022571).
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Wed, 30 Nov 2022 21:48:47 +0000
+
+pandas (1.3.5+dfsg-6) unstable; urgency=medium
+
+  * Team upload
+  * Ignore test failures for first build with Python 3.11,
+    see #1023965
+
+ -- Graham Inggs <ginggs@debian.org>  Sun, 13 Nov 2022 10:36:51 +0000
+
+pandas (1.5.1+dfsg-3) experimental; urgency=medium
+
+  * Revert minimum Cython version.
+  * Tests: fix another little-endian assumption.
+  * Silence some Lintian messages.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Thu, 10 Nov 2022 21:36:53 +0000
+
+pandas (1.5.1+dfsg-2) experimental; urgency=medium
+
+  * Tests: xfail rolling.var/std rounding error on i386,
+    don't assume little-endian,
+    re-disable some SQL tests our setup blocks.
+  * Temporarily lower minimum Cython version (see LP#1995992).
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Wed, 09 Nov 2022 21:17:44 +0000
+
+pandas (1.5.1+dfsg-1) experimental; urgency=medium
+
+  * New upstream release.  Update copyright and patches.
+  * Include more of the optional test-depends, for more test coverage.
+  * Update minimum dependency versions.
+  * Docs: update Sphinx extension dependencies,
+    use pydata theme but disable analytics (for privacy) and
+    features requiring dependencies we don't have.
+  * Tests: use the upstream mechanism to skip non-CI-friendly
+    (e.g. excessively resource-heavy) tests.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Mon, 07 Nov 2022 21:02:49 +0000
+
+pandas (1.4.3+dfsg-6) experimental; urgency=medium
+
+  * Fix NaT bug introduced by previous patch.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Wed, 24 Aug 2022 22:01:26 +0100
+
+pandas (1.4.3+dfsg-5) experimental; urgency=medium
+
+  * Fix bounds checking in float-to-datetime conversion.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Tue, 23 Aug 2022 22:24:15 +0100
+
+pandas (1.4.3+dfsg-4) experimental; urgency=medium
+
+  * Tests: skip another s390x numba issue, new riscv64 date error test.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Mon, 22 Aug 2022 22:25:08 +0100
+
+pandas (1.4.3+dfsg-3) experimental; urgency=medium
+
+  * Don't warn on non-x86 NaN->datetime cast,
+    as this has been fixed in numpy (#877754).
+  * Tests: fix architecture detection, skip another s390x numba crash,
+    extend hurd_compat.patch, more output around riscv64 date errors.
+  * Add transition Breaks (see #1017809).
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sun, 21 Aug 2022 18:13:03 +0100
+
+pandas (1.4.3+dfsg-2) experimental; urgency=medium
+
+  * Tests: don't assume little-endian,
+    don't try to cast pytz version to float,
+    ignore a missing numpy warning on armel.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sat, 20 Aug 2022 10:32:58 +0100
+
+pandas (1.4.3+dfsg-1) experimental; urgency=medium
+
+  * New upstream release.  Update copyright and patches.
+  * Upload to experimental, due to potential breakage.
+  * Remove xlwt Recommends.
+  * Update build/test-Depends.
+  * Re-enable numba (see #1000336).
+  * Update README.source.
+  * Update Lintian override format (see #1007002).
+  * Tests: add a searchable failure message to crashes/errors,
+    be less verbose so the log fits in the Salsa CI size limit,
+    remove a now-useless xlrd test, xfail some tests on 32-bit.
+  * Bump Standards-Version to 4.6.1 (no changes needed).
+  * Show the numba non-x86 warning on more (all?) numba uses.
+  * Drop stable_test_urls.patch.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Wed, 27 Jul 2022 22:17:33 +0100
+
+pandas (1.3.5+dfsg-5) unstable; urgency=medium
+
+  * Fix FTBFS with Sphinx 5.  (Closes: #1013375)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sun, 26 Jun 2022 15:06:07 +0100
+
+pandas (1.3.5+dfsg-4) unstable; urgency=medium
+
+  * Temporarily skip numba tests.  (Closes: #1008179)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Fri, 25 Mar 2022 20:57:26 +0000
+
+pandas (1.3.5+dfsg-3) unstable; urgency=medium
+
+  * Tests: be compatible with new fsspec.  (Closes: #1006170)
+  * Re-enable numba tests.
+  * Fix pyversions call.
+  * Enable Salsa CI.
+  * Update Lintian overrides.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Mon, 21 Feb 2022 07:35:51 +0000
+
+pandas (1.3.5+dfsg-2) unstable; urgency=medium
+
+  * Temporarily skip numba tests (see LP#1951814).
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Fri, 28 Jan 2022 19:22:53 +0000
+
+pandas (1.3.5+dfsg-1) unstable; urgency=medium
+
+  * Upstream bugfix release.  Update contributors_list.
+  * Refresh patches, drop patches no longer needed.
+  * d/watch: Ignore RC versions.
+  * Docs: Add missing mathjax dependency.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Tue, 11 Jan 2022 21:25:17 +0000
+
+pandas (1.3.4+dfsg-7) unstable; urgency=medium
+
+  * Team upload.
+  * Disable more numba tests on 32bit archs
+
+ -- Jochen Sprickerhof <jspricke@debian.org>  Thu, 02 Dec 2021 17:32:54 +0100
+
+pandas (1.3.4+dfsg-6) unstable; urgency=medium
+
+  * Team upload.
+  * Fix an other import in the test patch
+  * Add closes bug to old changelog
+
+ -- Jochen Sprickerhof <jspricke@debian.org>  Wed, 01 Dec 2021 10:36:56 +0100
+
+pandas (1.3.4+dfsg-5) unstable; urgency=medium
+
+  * Team upload.
+  * Fix missing import
+  * Remove unused salsa-ci.yml
+
+ -- Jochen Sprickerhof <jspricke@debian.org>  Tue, 30 Nov 2021 23:39:49 +0100
+
+pandas (1.3.4+dfsg-4) unstable; urgency=medium
+
+  * Tests: remove another 64 bit assumption.
+  * Warn the user and ignore all tests on mips*.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Tue, 30 Nov 2021 10:11:08 +0000
+
+pandas (1.3.4+dfsg-3) unstable; urgency=medium
+
+  * Tests: remove some more 64 bit or x86-or-arm64 assumptions.
+  * Docs: add missing MathJax.js symlink, remove unused URL replacement.
+  * Add transition Breaks (see #999415).
+  * Upload to unstable.  (Closes: #999415)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Mon, 29 Nov 2021 21:59:11 +0000
+
+pandas (1.3.4+dfsg-2) experimental; urgency=medium
+
+  * Stop ignoring build-time tests.
+  * Tests: don't assume 64 bit or x86-or-arm64.
+  * Fix #877754 warning.
+  * Xfail more numba tests on big-endian systems.
+  * Skip test_statsmodels during build to break dependency cycle.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Thu, 25 Nov 2021 22:04:47 +0000
+
+pandas (1.3.4+dfsg-1) experimental; urgency=medium
+
+  [ Andreas Tille ]
+  * Fix watchfile to detect new versions on github
+  * New upstream version (Closes: #1000422)
+  * Standards-Version: 4.6.0 (routine-update)
+  * Respect DEB_BUILD_OPTIONS in override_dh_auto_test target (routine-
+    update)
+  * Remove trailing whitespace in debian/copyright (routine-update)
+  * Add salsa-ci file (routine-update)
+  * Set upstream metadata fields: Bug-Submit, Repository, Repository-Browse,
+    Security-Contact.
+  * Remove obsolete field Name from debian/upstream/metadata (already present in
+    machine-readable debian/copyright).
+  * Apply multi-arch hints.
+    + python-pandas-doc: Add Multi-Arch: foreign.
+  * Remove hidden files and directories that are confusing gbp
+  * drop tag definitions from debian/gbp.conf
+
+  [ Rebecca N. Palmer ]
+  * Upload to experimental (see #999415).
+  * Update contributors_list and d/copyright.
+  * Refresh patches, remove no longer needed patches.
+  * Update minimum dependency versions.
+  * Re-enable xlrd and numba tests (#976620 and #972246 have ben fixed).
+  * Temporarily disable failing style.ipynb build.
+  * Tests: accept changed error messages, add new dependency,
+    skip/xfail tests that our test setup breaks, clean up afterwards.
+  * Temporarily ignore build-time tests to get a first build.
+  * Be compatible with matplotlib 3.5.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sun, 21 Nov 2021 21:04:26 +0000
+
+pandas (1.1.5+dfsg-2) unstable; urgency=medium
+
+  * Remove dead URL from tests/examples.  (Closes: #979621)
+  * Mark autopkgtest needs-internet.
+  * Revert "Print uname etc during build".
+  * Mark matplotlib nocheck/nodoc and allow building on ports without
+    matplotlib or numexpr.  (Closes: #977470)
+  * Add python3-tabulate build/test-depends.
+  * Add bottleneck and numexpr test-depends.
+  * Tests: don't require warnings that jedi no longer produces.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Tue, 12 Jan 2021 21:06:04 +0000
+
+pandas (1.1.5+dfsg-1) unstable; urgency=medium
+
+  * Upstream bugfix release.  Refresh patches, contributors_list.
+  * Default to openpyxl not xlrd in read_excel, and skip xlrd tests,
+    as xlrd fails if defusedxml is installed (#976620).
+  * Skip numba tests, and not other rolling.apply tests, on s390x.
+    (LP: #1901860)
+  * Tests: on 32 bit systems, avoid time input that overflows.
+  * Print uname etc during build (test for #973854).
+  * Bump Standards-Version to 4.5.1 (no changes needed).
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Mon, 07 Dec 2020 23:06:28 +0000
+
+pandas (1.1.4+dfsg-1) unstable; urgency=medium
+
+  * Upstream bugfix release.  Refresh patches, contributors_list.
+  * Remove numba test-depends (skips tests).  (Closes: #973589)
+  * Loosen pandas-lib->pandas Depends versioning.  (Closes: #973289)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Wed, 04 Nov 2020 18:36:11 +0000
+
+pandas (1.1.3+dfsg-2) unstable; urgency=medium
+
+  * Tests: re-xfail an intermittent instance of #877419.
+  * Remove no longer needed test_statsmodels xfail.
+  * Fix invalid test skips.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Thu, 22 Oct 2020 07:14:29 +0100
+
+pandas (1.1.3+dfsg-1) unstable; urgency=medium
+
+  * Upstream bugfix release.  Refresh patches, contributors_list.
+  * Remove outdated README.source,
+    add contributors_list update process.
+  * Fix invalid test xfail.
+  * Re-add mistakenly removed non-x86 test xfails.
+  * Declare transition Breaks (see #969650).
+  * Skip another test if multiprocessing is not available.
+  * Update cython3 Depends.
+  * Fix pytables expression bug with Python 3.9.  (Closes: #972015)
+  * Allow test_statsmodels to fail on 3.9 to break bootstrap cycle.
+  * Upload to unstable.  (Closes: #969650)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sun, 18 Oct 2020 16:22:36 +0100
+
+pandas (1.1.1+dfsg-3) experimental; urgency=medium
+
+  * Remove no longer needed test xfails.
+  * Xfail some more non-x86 numba tests and a new instance of #877419.
+  * Skip test_register_entrypoint during build.
+  * Tests: don't assume little-endian.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Thu, 03 Sep 2020 08:01:24 +0100
+
+pandas (1.1.1+dfsg-2) experimental; urgency=medium
+
+  * Unconditionally build-depend on sphinx-common (for dh_sphinxdoc).
+  * Re-enable but ignore another potentially crashing non-x86 test.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Tue, 01 Sep 2020 08:17:32 +0100
+
+pandas (1.1.1+dfsg-1) experimental; urgency=medium
+
+  * New upstream release.
+  * Upload to experimental.
+  * Drop/refresh patches.  Update d/copyright and contributors_list.
+  * Re-enable asyncio tests.
+  * Skip fsspec tests while it is too old a version.
+  * Fix plot test cleanup (upstream bug 35080).
+  * Skip test that is expected to fail in our setup.
+  * Update minimum dependency versions.
+  * Use dh_sphinxdoc.
+  * Re-enable but ignore potentially crashing non-x86 tests.
+  * Simplify d/rules, mostly by using pybuild more.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Mon, 31 Aug 2020 18:44:39 +0100
+
+pandas (1.0.5+dfsg-3) unstable; urgency=medium
+
+  * Remove pytest-asyncio test-depends.
+  * Remove numba test-depends on non-x86: at least s390x crashes.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Wed, 26 Aug 2020 22:34:50 +0100
+
+pandas (1.0.5+dfsg-2) unstable; urgency=medium
+
+  * Fix missing import and update numba submodule name in patches.
+  * Disable asyncio tests (workaround for #969050).
+  * Warn that numba may give wrong answers on non-x86,
+    and remove test-depends on mipsel.
+  * Skip a crashing test on s390x.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Wed, 26 Aug 2020 20:19:16 +0100
+
+pandas (1.0.5+dfsg-1) unstable; urgency=medium
+
+  * Upstream bugfix release.  Refresh patches, contributors_list.
+  * Fix invalid test xfails.
+  * Only Recommend numba on amd64, to reduce the risk of bugs.
+  * Don't test-depend on numba on ppc64el (where it crashes, #863511?)
+    or on ports architectures (where it mostly isn't available).
+  * Remove no longer needed test xfails/skips.
+  * Upload to unstable.  (Closes: #950430)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Tue, 25 Aug 2020 20:07:50 +0100
+
+pandas (0.25.3+dfsg2-5) unstable; urgency=medium
+
+  * Tests: ignore rounding difference on i386.  (Closes: #968208)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sun, 16 Aug 2020 20:09:14 +0100
+
+pandas (0.25.3+dfsg2-4) unstable; urgency=medium
+
+  * Be compatible with matplotlib 3.3.  (Closes: #966393)
+  * Docs: fix broken remote->local Javascript replacement.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sun, 09 Aug 2020 22:11:25 +0100
+
+pandas (0.25.3+dfsg2-3) unstable; urgency=medium
+
+  * Nested DataFrames may raise ValueError with numpy 1.19
+    (upstream bug 32289).  Clarify error message and xfail tests.
+  * Stop using a no-longer-existing numpy constant.
+  * Tests: ignore deprecations/rewordings and avoid setup exception
+    with numpy 1.19.  (Together, the above Closes: #963817)
+  * Bump debhelper compat to 13.
+  * Fix HDFStore.flush (part of #877419) on s390x.
+  * Add NEWS.html.gz for Standards-Version 4.5.0.
+    (Choosing not to also add NEWS.gz as it would be large.)
+  * Tests: accept Hurd's errno and lack of multiprocessing.
+  * Docs: replace embedded Javascript copies with links.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sun, 28 Jun 2020 21:47:22 +0100
+
+pandas (1.0.4+dfsg-1) experimental; urgency=medium
+
+  * Upstream bugfix release.  (Closes: #962335)
+  * Refresh patches, update contributors_list.
+  * Fix broken tests.
+  * Tests: allow numba to raise an error on 32 bit systems.
+  * Don't test-depend on numba on armel (where it crashes,
+    possibly #863508) or ppc64/riscv64 (where it isn't available).
+  * Xfail some more HDF5 tests on big-endian architectures.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Tue, 09 Jun 2020 22:19:23 +0100
+
+pandas (0.25.3+dfsg2-2) unstable; urgency=medium
+
+  * Tests: don't fail on jedi deprecation warnings.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Thu, 07 May 2020 11:57:06 +0100
+
+pandas (1.0.3+dfsg2-1) experimental; urgency=medium
+
+  * Upstream bugfix release, update contributors_list.
+  * Merge packaging from unstable,
+    but omit no longer needed clipboard warn/xfail.
+  * Only show the NaN -> datetime warning from float dtypes
+    (to avoid an exception while trying to check).
+  * Recommend numba, as we now have a recent enough version.
+  * Re-add dask test-dependency.
+  * Clarify non-x86 warnings, remove no longer needed xfails / ignores.
+  * Clean up whitespace and patch names/descriptions.
+  * Remove patches no longer needed.
+  * Network tests: use more stable URLs.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Wed, 06 May 2020 17:07:44 +0100
+
+pandas (0.25.3+dfsg2-1) unstable; urgency=medium
+
+  * Remove inconveniently licensed (CC-BY-SA) snippets.
+  * Fix (harmless) SyntaxWarning on install.  (Closes: #956021)
+  * Fix NaT sort order and test failures with numpy 1.18.
+    (Closes: #958531)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Wed, 06 May 2020 12:18:23 +0100
+
+pandas (0.25.3+dfsg-9) unstable; urgency=medium
+
+  * Don't raise on import without matplotlib installed.  Add test of this.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Fri, 03 Apr 2020 21:56:02 +0100
+
+pandas (0.25.3+dfsg-8) unstable; urgency=medium
+
+  * Tests: don't fail on harmless changes in dependencies.  (Closes: #954647)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Thu, 02 Apr 2020 18:53:32 +0100
+
+pandas (0.25.3+dfsg-7) unstable; urgency=medium
+
+  * Fix another test failure due to our warnings.
+  * Skip rather than xfail crashing tests.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Wed, 26 Feb 2020 18:45:58 +0000
+
+pandas (0.25.3+dfsg-6) unstable; urgency=medium
+
+  * Don't fail tests on our own warnings.
+  * Xfail some more HDF tests on non-x86 architectures.
+  * Warn that clipboard I/O is broken on big-endian architectures
+    and xfail test.
+  * Use pytest-forked to isolate (already xfailed) crashing test.
+  * Xfail tests that use no-longer-existing URLs.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Wed, 26 Feb 2020 07:40:25 +0000
+
+pandas (0.25.3+dfsg-5) unstable; urgency=medium
+
+  * Backport packaging from experimental:
+    - Remove unnecessary test skips, and reorganize remaining ones.
+    - Use xfails instead of skips.
+    - Add warnings for the known non-x86 breakages
+      (NaN -> datetime #877754, HDF and Stata I/O #877419).
+    - Tell I/O tests where to find the source tree's test data
+      instead of skipping them.
+    - Stop using deprecated envvar/tag names.
+    - Use https for links where available.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Mon, 24 Feb 2020 22:38:26 +0000
+
+pandas (1.0.1+dfsg-1) experimental; urgency=medium
+
+  * Upstream bugfix release.
+  * Refresh patches.
+  * Update and sort d/copyright, update contributors_list.
+  * Re-enable checking the test suite.
+  * Declare transition Breaks (see #950430).
+  * Add jinja2 recommends/test-depends.
+  * Fix test_to_numpy failure on big-endian systems.
+  * Register documentation in doc-base.  (Closes: #879226)
+  * Remove no longer needed test xfails/skips,
+    and reorganize the remaining ones.
+  * Tell I/O tests where to find the source tree's test data
+    instead of skipping them.
+  * Enable multiarch.
+  * Temporarily drop dask test-dependency to avoid uninstallability.
+  * Add warnings for the known non-x86 breakages
+    (NaN -> datetime #877754, HDF and Stata I/O #877419).
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sun, 23 Feb 2020 17:13:08 +0000
+
+pandas (1.0.0+dfsg-1) experimental; urgency=medium
+
+  * New upstream release.
+  * Upload to experimental, as this is an API break (see #950430).
+  * Drop patches applied upstream, refresh others.
+  * Update and improve d/copyright, update contributors_list.
+  * Xfail a test that fails in the C locale.
+  * Update and organize depends/recommends.
+  * Docs: use a sphinx theme we have, fix spelling,
+    link to rather than embed remote resource,
+    use https links where available.
+  * Stop using deprecated envvar/tag names.
+  * Xfail rather than skip previously broken tests,
+    and put the condition in the patch not d/rules or d/tests.
+  * Remove no longer used patch-stamp.
+  * Temporarily ignore the test suite to get a first build.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sun, 02 Feb 2020 21:04:36 +0000
+
+pandas (0.25.3+dfsg-4) unstable; urgency=medium
+
+  * No-change upload to unstable.  (Closes: #937236, #931557)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sun, 10 Nov 2019 16:35:41 +0000
+
+pandas (0.25.3+dfsg-3) experimental; urgency=medium
+
+  * Fix autopkgtest.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sat, 09 Nov 2019 10:29:47 +0000
+
+pandas (0.25.3+dfsg-2) experimental; urgency=medium
+
+  * Split up the test suite to fit in memory on mipsel,
+    and stop ignoring it there.  (Closes: #943732)
+  * Reproducibility: use correct path for stripping docs.
+  * Declare transition Breaks (see #931557).
+  * Tests: ignore warning from Python 3.8.
+  * Update d/copyright (some files have moved).
+  * Use local requirejs.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Fri, 08 Nov 2019 07:56:16 +0000
+
+pandas (0.25.3+dfsg-1) experimental; urgency=medium
+
+  * Upstream bugfix release.
+  * Drop patch no longer needed.
+  * Update autopkgtest dependencies, drop unused link.
+  * Better document test skips, remove unnecessary ones.
+  * Reproducibility: strip timestamps and build paths,
+    use fixed random seeds for building documentation.
+  * Ignore test suite on mipsel.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sat, 02 Nov 2019 22:26:31 +0000
+
+pandas (0.25.2+dfsg-2) experimental; urgency=medium
+
+  * Correct path for contributors list, and don't fail when
+    not building the -doc package.
+  * Try again to fix test failure due to deb_nonversioneer_version.
+  * Skip some failing tests on non-Intel (see #943732),
+    require other tests to pass.
+  * Fix another typo.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Mon, 28 Oct 2019 22:06:10 +0000
+
+pandas (0.25.2+dfsg-1) experimental; urgency=medium
+
+  [ Graham Inggs ]
+  * Skip python2 test_register_by_default on s390x
+  * Fix python2 test failures in certain locales
+
+  [ Yaroslav Halchenko ]
+  * Recent upstream release
+  * Updated patches
+  * Adjusted for the gone ci/print_versions
+  * d/control
+    - added python{,3}-hypothesis to b-depends
+
+  [ Rebecca N. Palmer ]
+  * New upstream release.
+  * Upload to experimental, as this is an API break (see #931557).
+  * Drop patches fixed upstream, refresh others.
+  * Remove Python 2 packages (see #937236).
+  * Use Python 3 in shebangs and subprocess calls.
+  * Re-enable building on Python 3.8.
+  * Use the new location of print_versions.
+  * Skip feather tests and remove build-dependency:
+    they now need pyarrow.feather, which isn't in Debian.
+  * Don't fail tests for our versioneer removal
+    or a differently worded error message.
+  * Add/update minimum dependency versions.
+  * Add numpydoc, nbconvert and pytest-xdist build-depends.
+  * Update d/copyright.
+  * Pre-generate a contributor list to avoid needing the git log
+    at build time (when it won't exist).
+  * Allow tests to fail for now.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Mon, 28 Oct 2019 07:53:21 +0000
+
+pandas (0.23.3+dfsg-8) unstable; urgency=medium
+
+  * Examples dependencies: re-add statsmodels and xarray;
+    also add rpy2 and feather.
+  * Use packaged intersphinx indexes.  (Closes: #876417)
+  * Use https for intersphinx links.
+  * Remove cythonized-files*.  (They are regenerated on each build.)
+  * Remove test xfail, as statsmodels has now been fixed.
+  * Set Rules-Requires-Root: no.
+  * Make documentation Suggest the Python 3 version.
+  * Suggest statsmodels.
+  * Only use Python 3 sphinx, and mark it -Indep/nodoc.
+  * Bump debhelper compat to 12 and use debhelper-compat and pybuild.
+  * Remove pycompat and X-Python*-Version.
+  * Add missing d/copyright item.
+  * Remove obsolete TODOs.
+  * Clarify descriptions.
+  * Stop referring to examples that no longer exist.
+  * Fix typos.
+  * Remove old (no longer used) EXCLUDE_TESTS*.
+  * Deduplicate documentation files.
+  * Use Python 3 shebangs, and fix broken shebang.
+  * Add python3-ipykernel, -ipywidgets, -seaborn to
+    Build-Depends-Indep.
+  * Disable dh_auto_test: it fails, and we run the tests elsewhere.
+  * Mark test dependencies nocheck/nodoc.
+  * Remove old minimum versions / alternative dependencies.
+  * Build-depend on dh-python.
+  * Don't build on python3.8, as it will fail tests (see #931557).
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sun, 27 Oct 2019 11:38:37 +0000
+
+pandas (0.23.3+dfsg-7) unstable; urgency=medium
+
+  * Revert test patch and use an xfail instead.
+  * Temporarily drop statsmodels+xarray Build-Depends, as they are
+    uninstallable until this is built.
+  * Add python3-xarray to autopkgtest Depends.
+  * Drop Python 2 autopkgtest (but keep build-time test).
+  * Remove duplicate Recommends.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Fri, 20 Sep 2019 08:01:37 +0100
+
+pandas (0.23.3+dfsg-6) unstable; urgency=medium
+
+  * Team upload
+  * Avoid FTBFS with statsmodels 0.9.0
+  * Add python3-statsmodels to autopkgtest Depends
+
+ -- Graham Inggs <ginggs@debian.org>  Wed, 18 Sep 2019 13:46:01 +0000
+
+pandas (0.23.3+dfsg-5) unstable; urgency=medium
+
+  * Team upload
+  * Add locales-all to Build-Depends and autopkgtest Depends in order to
+    consistently test in all avalable locales
+  * Add crh_UA to skip_noencoding_locales.patch
+  * Fix wrong debian/source/options exclude, thanks Steve Langasek
+
+ -- Graham Inggs <ginggs@debian.org>  Wed, 18 Sep 2019 05:57:44 +0000
+
+pandas (0.23.3+dfsg-4) unstable; urgency=medium
+
+  * Add self to Uploaders.
+  * Recommend .xls format support also in Python 3.  (Closes: #880125)
+  * Tests: don't call fixtures, as this is an error in pytest 4+.
+  * Don't test datetime in locales with no encoding.
+    (These are broken by a Python stdlib bug.)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sat, 14 Sep 2019 16:37:43 +0100
+
+pandas (0.23.3+dfsg-3) unstable; urgency=medium
+
+  * Team upload.
+  * Make np.array @ Series act the right way round.  (Closes: #923708)
+  * Replace #918206 fix with a fix that doesn't change the return type
+    and inplace-ness of np.array += DataFrame.  (Closes: #923707)
+  * Fix missing page in documentation.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Wed, 06 Mar 2019 22:19:34 +0000
+
+pandas (0.23.3+dfsg-2) unstable; urgency=medium
+
+  * Team upload.
+  * Don't fail the build on +dfsg versions.
+  * Fix another d/copyright issue.
+  * Add d/upstream/metadata.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Sat, 02 Mar 2019 14:57:12 +0000
+
+pandas (0.23.3+dfsg-1) unstable; urgency=medium
+
+  * Team upload.
+  * Fix DataFrame @ np.array matrix multiplication.  (Closes: #918206)
+  * Fix documentation build (Sphinx now defaults to Python 3).
+    (Closes: #804552, LP: #1803018)
+  * Add documentation examples dependencies.
+  * Update d/copyright.
+  * Remove unlicensed files.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com>  Fri, 01 Mar 2019 23:02:18 +0000
+
+pandas (0.23.3-1) unstable; urgency=medium
+
+  * New upstream release
+  * debian/patches
+    - many upstreamed patches are removed and others refreshed
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Sat, 28 Jul 2018 00:39:32 -0400
+
+pandas (0.22.0-8) unstable; urgency=medium
+
+  * Team Upload.
+  * patches:
+    + Add patch: deb_dont_call_py2_in_py3_test.patch
+      During python3 unit test, command 'python' is called by one of
+      the tests.  When there is no python2 installation, tests such as
+      autopkgtest would fail.
+    * Put the conditionally applied patch to series' comment to avoid
+      lintianW: patch-file-present-but-not-mentioned-in-series.
+  * Trying to fix the autopkgtest:
+    + Leave a comment about the way to run unittest in the test control file.
+    + Synchronize B-D and autopkgtest depends.
+    + Allow output to stderr during test.
+    * Switch from nosetest to pytest.
+    * Synchronize pytest argument for rules and autopkgtest.
+    - Replace tests/unittest with the symlink pointed to tests/unittest3.
+      That scripts is smart enough to tell from py2 and py3, so we won't
+      need to write the same thing twice.
+    - Filter out intel tests on non-x86 architectures.
+    - Only enable "slow" tests on (Debian + x86) tester. "slow" tests may
+      consume too much memory to cause memory error or trigger OOM killer.
+  * control:
+    + Add missing python3 dependencies and sort the B-D list.
+    * Point Vcs-* fields to Salsa.
+    * Update Homepage to https://pandas.pydata.org/ .
+  * rules:
+    * Reverse the architecture filtering logic.
+    * Disable "slow" tests during build for non-x86 architectures.
+      This may significantly reduce the build time on those weak architectures.
+    * Don't specify the pytest marker expression twice.
+      The first expression will be overridden.
+    * Fix hardening flags.
+    - Cleanup the mess of unused nosetest exclusion expressions.
+  * Update lintian overrides.
+    + Override source-is-missing error, which is a false-positive triggered
+      by insane-line-length-in-source-file.
+    + Override insane-line-length-in-source-file because we have nothing
+      todo with lenghy lines in html.
+  * TODO: Point out that the unittest speed can be boosted with pytest-xdist.
+
+ -- Mo Zhou <cdluminate@gmail.com>  Sun, 17 Jun 2018 16:01:16 +0000
+
+pandas (0.22.0-7) unstable; urgency=medium
+
+  * Team Upload.
+
+  [ Mo Zhou ]
+  * Remove patch: deb_fix_test_failure_test_basic_indexing, which is
+    unneeded for pandas >= 0.21 . (Closes: #900061)
+
+  [ Graham Inggs ]
+  * Add riscv64 to the list of "not intel" architectures
+  * Update mark_tests_working_on_intel_armhf.patch
+
+ -- Graham Inggs <ginggs@debian.org>  Tue, 29 May 2018 13:50:59 +0000
+
+pandas (0.22.0-6) unstable; urgency=medium
+
+  * Team upload
+  * Fix FTBFS with Sphinx 1.7, thanks Dmitry Shachnev!
+
+ -- Graham Inggs <ginggs@debian.org>  Tue, 24 Apr 2018 19:09:20 +0000
+
+pandas (0.22.0-5) unstable; urgency=medium
+
+  * Team upload
+  * Add compatibility with Matplotlib 2.2 (Closes: #896673)
+
+ -- Graham Inggs <ginggs@debian.org>  Mon, 23 Apr 2018 13:56:12 +0000
+
+pandas (0.22.0-4) unstable; urgency=medium
+
+  * Team upload
+  * Fix more tests expecting little-endian results
+  * Fix heap corruption in read_csv on 32-bit, big-endian architectures
+    (Closes: #895890)
+
+ -- Graham Inggs <ginggs@debian.org>  Sun, 22 Apr 2018 21:48:27 +0000
+
+pandas (0.22.0-3) unstable; urgency=medium
+
+  * Team upload
+  * Refresh and re-enable mark_tests_working_on_intel.patch
+  * Fix test__get_dtype tests expecting little-endian results
+
+ -- Graham Inggs <ginggs@debian.org>  Thu, 12 Apr 2018 11:04:21 +0000
+
+pandas (0.22.0-2) unstable; urgency=medium
+
+  * debian/patches
+    - as upstream moved over to pytest from nose, no more nose imports were
+      in the code. Just adjusted patches to import nose where needed
+  * debian/rules
+    - specify LC_ALL=C locale to avoid crash while building docs
+    - add the 0001-TST-pytest-deprecation-warnings-GH17197-17253-reversed.patch
+      to the series if building on a system with an old pytest
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Wed, 21 Feb 2018 23:44:58 -0500
+
+pandas (0.22.0-1) unstable; urgency=medium
+
+  * Upstream release
+  * debian/patches
+    - refreshed many
+    - updated some
+    - added
+      - up_moto_optional  to skip tests requiring moto (#777089)
+      - deb_skip_difffailingtests to skip  two failing tests
+        (see https://github.com/pandas-dev/pandas/issues/19774)
+      - up_xlwt_optional to skip a test requiring xlwt
+      - deb_ndsphinx_optional to make nbsphinx optional.
+        Make nbsphinx not required in build-depends on systems with
+        older python-sphinx
+      - mark_tests_failing_on_386.patch
+        see https://github.com/pandas-dev/pandas/issues/19814
+    - removed adopted upstream:
+         - dateutil-2.6.1-fixed-ambiguous-tz-dst-be.patch
+      - up_tst_np_argsort_comparison2
+    - disabled for now:
+      - mark_tests_working_on_intel.patch
+      - up_tst_dont_assert_that_a_bug_exists_in_numpy
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Wed, 21 Feb 2018 10:30:06 -0500
+
+pandas (0.20.3-11) unstable; urgency=medium
+
+  * Team upload.
+  * Cherry-pick upstream commit 5f2b96bb637f6ddeec169c5ef8ad20013a03c853
+    to workaround a numpy bug. (Closes: #884294)
+    + patches/up_tst_dont_assert_that_a_bug_exists_in_numpy
+  * Cherry-pick upstream commits to fix test failure caused by test_argsort().
+    + patches/up_tst_np_argsort_comparison2
+  * Workaround test failure of test_basic_indexing() in file
+    pandas/tests/series/test_indexing.py .
+    + patches/deb_fix_test_failure_test_basic_indexing
+
+ -- Mo Zhou <cdluminate@gmail.com>  Sat, 20 Jan 2018 09:00:31 +0000
+
+pandas (0.20.3-10) unstable; urgency=medium
+
+  * Team upload.
+  * Exclude more tests failing on mips, armhf and powerpc
+
+ -- Andreas Tille <tille@debian.org>  Tue, 24 Oct 2017 21:26:02 +0200
+
+pandas (0.20.3-9) unstable; urgency=medium
+
+  * Team upload.
+  * Add missing "import pytest" to two patched tests
+  * Secure URI in watch file
+
+ -- Andreas Tille <tille@debian.org>  Tue, 24 Oct 2017 08:18:54 +0200
+
+pandas (0.20.3-8) unstable; urgency=medium
+
+  * Team upload.
+  * Exclude one more test and de-activate non-working ignore of test errors
+
+ -- Andreas Tille <tille@debian.org>  Mon, 23 Oct 2017 21:32:24 +0200
+
+pandas (0.20.3-7) unstable; urgency=medium
+
+  * Team upload.
+  * debhelper 9
+  * Use Debian packaged mathjax
+  * Do not Recommends python3-six since it is mentioned in Depends
+  * Remove redundant/outdated XS-Testsuite: autopkgtest
+  * Exclude one more test and de-activate non-working ignore of test errors
+
+ -- Andreas Tille <tille@debian.org>  Mon, 23 Oct 2017 17:33:55 +0200
+
+pandas (0.20.3-6) unstable; urgency=medium
+
+  * Team upload.
+  * Ignore test errors on some architectures
+    (Concerns bug #877419)
+  * Remove __pycache__ remainings from testing
+  * Standards-Version: 4.1.1
+  * DEP3 for Google Analytics patch
+  * Complete Google Analytics patch
+
+ -- Andreas Tille <tille@debian.org>  Mon, 23 Oct 2017 09:05:27 +0200
+
+pandas (0.20.3-5) unstable; urgency=medium
+
+  * Make sure remainings of nose tests will not fail. That's a pretty stupid
+    patch since the tests are not using nose any more only some remaining
+    exceptions.  Hope it will work anyway.
+    (Concerns bug #877419)
+
+ -- Andreas Tille <tille@debian.org>  Mon, 16 Oct 2017 21:57:45 +0200
+
+pandas (0.20.3-4) unstable; urgency=medium
+
+  * Mark those tests @pytest.mark.intel that pass only on Intel architectures
+  * d/rules: try to exclude tests that were marked "intel"
+    (Concerns bug #877419)
+
+ -- Andreas Tille <tille@debian.org>  Sat, 14 Oct 2017 19:49:01 +0200
+
+pandas (0.20.3-3) unstable; urgency=medium
+
+  * Team upload.
+  * Moved packaging from pkg-exppsy to Debian Science
+  * Exclude certain tests on certain architectures
+    (Concerns bug #877419)
+
+ -- Andreas Tille <tille@debian.org>  Fri, 13 Oct 2017 20:52:53 +0200
+
+pandas (0.20.3-2) unstable; urgency=medium
+
+  * debian/control
+    - boosted policy to 4.0.0 (I think we should be ok)
+    - drop statsmodels from build-depends to altogether avoid the circular
+      build-depends (Closes: #875805)
+  * Diane Trout:
+    - Add dateutil-2.6.1-fixed-ambiguous-tz-dst-be.patch (Closes: #875807)
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Thu, 21 Sep 2017 16:11:29 -0400
+
+pandas (0.20.3-1) unstable; urgency=medium
+
+  * Fresh upstream release
+  * debian/patches
+    - updated some, removed changeset*, and disabled possibly fixed upstream
+      ones
+  * debian/{control,rules}
+    - upstream switched to use pytest instead of nose
+    - enabled back all the tests for now
+    - added python-nbsphinx for b-depends, needed for docs
+  * debian/*.install
+    - no .so at the first level of subdirectories, now present on the third
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Mon, 10 Jul 2017 20:00:59 -0400
+
+pandas (0.19.2-5.1) unstable; urgency=medium
+
+  * Non-maintainer upload.
+  * Apply patch by Rebecca N. Palmer
+    Closes: #858260
+
+ -- Andreas Tille <tille@debian.org>  Sun, 02 Apr 2017 07:06:36 +0200
+
+pandas (0.19.2-5) unstable; urgency=medium
+
+  * And one more test to skip on non-amd64 -- test_round_trip_valid_encodings
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Thu, 12 Jan 2017 13:10:11 -0500
+
+pandas (0.19.2-4) unstable; urgency=medium
+
+  * Exclude few more "plotting" tests on non-amd64 which cause FTBFS
+    on s390
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Thu, 12 Jan 2017 11:43:13 -0500
+
+pandas (0.19.2-3) unstable; urgency=medium
+
+  * Brought back changeset_0699c89882133a41c250abdac02796fec84512e8.diff
+    which should resolve tests failures on BE platforms (wasn't yet
+    upstreamed within 0.19.x releases)
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Thu, 12 Jan 2017 09:44:52 -0500
+
+pandas (0.19.2-2) unstable; urgency=medium
+
+  * Exclude a number of tests while running on non-amd64 platforms
+    due to bugs in numpy/pandas
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Wed, 11 Jan 2017 12:13:05 -0500
+
+pandas (0.19.2-1) unstable; urgency=medium
+
+  * Fresh upstream minor release -- supposed to be bugfix but interacts
+    with current beta (1:1.12.0~b1-1) numpy leading to various failed tests
+  * debian/patches
+    - changeset_ae6a0a51cf41223394b7ef1038c210045d486cc8.diff
+      to guarantee the same Series dtype as of cut regardless of architecture
+    - up_buggy_overflows
+      workaround for inconsistent overflows while doing pow operation on big
+      ints
+  * debian/rules
+    - exclude more tests which are due to known issues in numpy beta and thus
+      not to be addressed directly in pandas
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Wed, 04 Jan 2017 10:19:52 -0500
+
+pandas (0.19.1+git174-g81a2f79-1) experimental; urgency=medium
+
+  * New upstream snapshot from v0.19.0-174-g81a2f79
+    - lots of bugfixes since 0.19.1, so decided to test snapshot
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Sat, 10 Dec 2016 22:43:19 -0500
+
+pandas (0.19.1-3) unstable; urgency=medium
+
+  * Require cython >= 0.23 or otherwise use pre-cythoned sources
+    (should resolve https://github.com/pandas-dev/pandas/issues/14699
+    on jessie)
+  * debian/control
+    - Build-Conflicts with python-tables 3.3.0-4 since that one leads to FTBFS
+    - boosted policy to 3.9.8
+  * debian/rules
+    - Exclude few more tests which fail on big endian and other platforms
+      test_(msgpack|read_dta18)
+  * debian/patches
+    - changeset_0699c89882133a41c250abdac02796fec84512e8.diff
+      to compare in the tests against native endianness
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Fri, 09 Dec 2016 15:49:50 -0500
+
+pandas (0.19.1-2) unstable; urgency=medium
+
+  * debian/control
+    - Moved statsmodels build-depend (optional) under build-depends-indep
+      to break circular dependency.  Thanks Stuart Prescott for the analysis
+  * debian/patches/
+    - changeset_1309346c08945cd4764a549ec63cf51089634a45.diff
+      to not mask problem reading json leading to use of undefined variable
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Sun, 27 Nov 2016 21:49:40 -0500
+
+pandas (0.19.1-1) unstable; urgency=medium
+
+  * Fresh upstream release
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Fri, 18 Nov 2016 12:19:54 -0500
+
+pandas (0.19.0+git14-ga40e185-1) unstable; urgency=medium
+
+  * New upstream post-release (includes some bugfixes) snapshot
+  * debian/patches
+    - dropped changeset_ and up_ patches adopted upstream, refreshed the rest
+  * debian/rules,patches
+    - save debian-based version into __version.py, so doesn't conflict with
+      upstream tests of public API
+    - exclude for now test_expressions on python3
+      (see https://github.com/pydata/pandas/issues/14269)
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Thu, 13 Oct 2016 10:26:18 -0400
+
+pandas (0.18.1-1) unstable; urgency=medium
+
+  * Fresh upstream release
+  * debian/patches/
+    - changeset_46af7cf0f8e0477f6cc7454aa786a573228f0ac3.diff
+      to allow also AttributeError exception being thrown in the tests
+      (Closes: #827938)
+    - debian/patches/deb_skip_test_precision_i386
+      removed (upstreamed)
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Wed, 13 Jul 2016 10:42:00 -0400
+
+pandas (0.18.0+git114-g6c692ae-1) unstable; urgency=medium
+
+  * debian/control
+    - added python{,3}-pkg-resources to direct Depends for the packages
+      (Closes: #821076)
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Sun, 17 Apr 2016 20:49:25 -0400
+
+pandas (0.17.1-3) unstable; urgency=medium
+
+  * debian/tests/unittest*
+    - set LC_ALL=C.UTF-8 for the tests run to prevent failure of test_set_locale
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Tue, 08 Dec 2015 08:31:30 -0500
+
+pandas (0.17.1-2) unstable; urgency=medium
+
+  * debian/control
+    - make -statsmodels and -tables optional build-depends on those platforms
+      where they are N/A atm.  Added bdepends on python3-tables since available
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Sun, 06 Dec 2015 12:58:26 -0500
+
+pandas (0.17.1-1) unstable; urgency=medium
+
+  * Fresh upstream bugfix release
+  * debian/rules
+    - fixed deletion of moved away .so files
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Fri, 27 Nov 2015 10:52:49 -0500
+
+pandas (0.17.0+git8-gcac4ad2-2) unstable; urgency=medium
+
+  * Bug fix: install also msgpack/*.so extensions to -lib packages
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Sat, 10 Oct 2015 13:52:54 -0400
+
+pandas (0.17.0+git8-gcac4ad2-1) unstable; urgency=medium
+
+  * New upstream snapshot post release to pick up few bugfixes
+    - Started to trigger failures of test_constructor_compound_dtypes and
+      test_invalid_index_types -- disabled those for now, see
+      https://github.com/pydata/pandas/issues/11169
+  * debian/rules
+    - Generate pandas/version.py if not present out of debian/changelog
+      upstream version information (versioneer wouldn't know since relies on
+      git)
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Fri, 09 Oct 2015 21:35:23 -0400
+
+pandas (0.16.2+git65-g054821d-1) unstable; urgency=medium
+
+  * Fresh upstream post-release snapshot (to pick up recent fixes etc)
+    (Closes: #787432)
+  * debian/{control,rules}
+    - build -doc package (Closes: #660900)
+    - add ipython (or alternative new ones from neurodebian) into
+      Build-Depends-Indep to build docs
+    - add python{,3}-{lxml,html5lib} to Build-Depends and Recommends
+    - use LC_ALL=C.UTF-8 while running tests
+    - exclude also test_set_locale since it fails ATM
+      see https://github.com/pydata/pandas/issues/10471
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Tue, 30 Jun 2015 17:26:54 -0400
+
+pandas (0.16.0~rc1-1) experimental; urgency=medium
+
+  * New upstream release candidate
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Fri, 13 Mar 2015 14:21:39 -0400
+
+pandas (0.15.2-1) unstable; urgency=medium
+
+  * Fresh upstream release
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Thu, 11 Dec 2014 09:51:57 -0500
+
+pandas (0.15.1+git125-ge463818-1) unstable; urgency=medium
+
+  * New upstream snapshot from v0.15.1-125-ge463818.
+  * Upload to unstable during freeze since previous one in sid didn't make it
+    to jessie anyways
+  * debian/control
+    - remove versioning demand for cython (it would use pre-cythonized code on
+      older ones and there is no longer need in sid/jessie to enforce version).
+      As a consecuence -- removed all dsc patches pointing to
+      nocython3-dsc-patch, since no longer needed
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Sun, 30 Nov 2014 21:09:36 -0500
+
+pandas (0.15.0-2) unstable; urgency=medium
+
+  * debian/control
+    - specify minimal numpy to be 1.7
+  * debian/patches
+    - deb_skip_stata_on_bigendians   skip test_stata again on BE platforms
+    - deb_skip_test_precision_i386   skip test_precision_conversion on 32bit
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Thu, 30 Oct 2014 23:09:13 -0400
+
+pandas (0.15.0-1) unstable; urgency=medium
+
+  * New upstream release
+  * debian/control
+    - restrict statsmodels and matplotlib from being required on the ports
+      which do not have them
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Sun, 26 Oct 2014 11:30:23 -0400
+
+pandas (0.14.1-2) unstable; urgency=medium
+
+  * debian/patches/changeset_314012d.diff
+    - Fix converter test for MPL1.4 (Closes: #763709)
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Mon, 06 Oct 2014 11:53:42 -0400
+
+pandas (0.14.1-1) unstable; urgency=medium
+
+  * New upstream release
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Thu, 10 Jul 2014 23:38:49 -0400
+
+pandas (0.14.0+git393-g959e3e4-1) UNRELEASED; urgency=medium
+
+  * New upstream snapshot from v0.14.0-345-g8cd3dd6
+  * debian/rules
+    - disable running disabled tests to prevent clipboard tests failures
+      under kfreebsd kernels
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Mon, 07 Jul 2014 12:29:50 -0400
+
+pandas (0.14.0+git213-g741b2fa-1) experimental; urgency=medium
+
+  * New upstream snapshot from v0.14.0-213-g741b2fa.
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Thu, 19 Jun 2014 10:30:42 -0400
+
+pandas (0.14.0+git17-g3849d5d-1) unstable; urgency=medium
+
+  * New upstream snapshot from v0.14.0-17-g3849d5d -- has resolved a number
+    of bugs sneaked into 0.14.0 release, and caused FTBFS on some platforms
+    and backports
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Sun, 01 Jun 2014 00:54:34 -0400
+
+pandas (0.14.0-1) unstable; urgency=medium
+
+  * New upstream release
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Fri, 30 May 2014 08:45:35 -0400
+
+pandas (0.14.0~rc1+git79-g1fa5dd4-1) experimental; urgency=medium
+
+  * New upstream snapshot from v0.14.0rc1-73-g8793356
+  * debian/patches:
+    - dropped CPed changeset_*s
+    - added deb_disable_googleanalytics
+  * debian/control:
+    - boosted policy compliance to 3.9.5
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Tue, 27 May 2014 16:00:00 -0400
+
+pandas (0.13.1-2) unstable; urgency=low
+
+  * debian/patches/changeset_6d56e7300d66d3ba76684334bbb44b6cd0ea9f61.diff
+    to fix FTBFS of statsmodels due to failing tests (Closes: #735804)
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Sat, 08 Feb 2014 12:46:42 -0500
+
+pandas (0.13.1-1) unstable; urgency=low
+
+  * Fresh upstream release
+  * debian/patches
+    - deb_skip_test_pytables_failure to mitigate error while testing on
+      amd64 wheezy and ubuntu 12.04
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Tue, 04 Feb 2014 12:09:29 -0500
+
+pandas (0.13.0+git464-g15a8ff7-1) experimental; urgency=low
+
+  * Fresh pre-release snapshot
+  * debian/patches
+    - removed all cherry-picked patches (should have been upstreamed)
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Wed, 29 Jan 2014 21:27:45 -0500
+
+pandas (0.13.0-2) unstable; urgency=low
+
+  * debian/patches
+    - 0001-BLD-fix-cythonized-msgpack-extension-in-setup.py-GH5.patch
+      to resolve issue with building C++ Cython extension using
+      pre-generated sources
+    - 0001-Add-division-future-import-everywhere.patch
+      0002-remove-explicit-truediv-kwarg.patch
+      to resolve compatibility issues with elderly Numexpr
+    - 0001-BUG-Yahoo-finance-changed-ichart-url.-Fixed-here.patch
+    - deb_skip_sequencelike_on_armel to prevent FTBFS on armel due to failing
+      test: https://github.com/pydata/pandas/issues/4473
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Fri, 03 Jan 2014 23:13:48 -0500
+
+pandas (0.13.0-1) unstable; urgency=low
+
+  * Fresh upstream release
+    - resolved compatibility with matplotlib 1.3 (Closes: #733848)
+  * debian/{control,rules}
+    - use xvfb (added to build-depends together with xauth, and xclip)
+      for tests
+    - define http*_proxy to prevent downloads
+    - install .md files not .rst for docs -- were renamed upstream
+    - include .cpp Cython generated files into debian/cythonized-files*
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Wed, 01 Jan 2014 18:08:22 -0500
+
+pandas (0.12.0-2) unstable; urgency=low
+
+  [ Dmitry Shachnev ]
+  * DEP-8 tests improvements:
+    - Use Xvfb for running tests.
+    - Increase verbosity using -v flag.
+    - Fix printing interpreter version in unittests3.
+  * Fix indentaion in debian/control.
+
+  [ Yaroslav Halchenko ]
+  * debian/control
+    - place python3-matplotlib ahead of elderly python-matplotlib without
+      python3 support since now we have python3-matplotlib in sid
+  * debian/copyright
+    - go through reported missing copyright/license statements (Closes:
+      #700564) Thanks Luca Falavigna for the report
+  * debian/rules,patches
+    - exclude test test_bar_log due to incompatibility with matplotlib 1.3.0 (test
+      adjusted upstream and would be re-enabled for the new release).
+    - debian/patches/changeset_952c5f0bc433622d21df20ed761ee4cb728370eb.diff
+      adds matplotlib 1.3.0 compatibility
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Sat, 14 Sep 2013 20:02:58 -0400
+
+pandas (0.12.0-1) unstable; urgency=low
+
+  * New upstream release:
+    - should address failed tests on 32bit platforms
+  * debian/patches
+    - neurodebian: allow to build for jessie with outdated cython
+  * debian/control
+    - build for Python2 >= 2.7 due to some (probably temporary) incompatibilities
+      in tests with 2.6
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Wed, 24 Jul 2013 23:29:03 -0400
+
+pandas (0.12.0~rc1+git127-gec8920a-1) experimental; urgency=low
+
+  * New upstream snapshot from origin/master at v0.12.0rc1-127-gec8920a
+    - should address FTBFS due to failing tests on big endians
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Sat, 20 Jul 2013 09:23:04 -0400
+
+pandas (0.12.0~rc1+git112-gb79996c-1) experimental; urgency=low
+
+  * Fresh git snapshot of upstream candidate release. Experimental build
+    to verify functioning across the ports.
+  * debian/control
+    - dedented last "paragraph" to break it away from the 2nd one.
+      Thanks Beatrice Torracca for the detailed report (Closes: #712260)
+    - Depends on python-six now
+  * debian/{,tests/}control
+    - added python{,3}-bs4, python-html5lib to Build-Depends for more
+      thorough testing
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Thu, 18 Jul 2013 13:15:19 -0400
+
+pandas (0.11.0-2) unstable; urgency=low
+
+  [ Yaroslav Halchenko ]
+  * Upload to unstable -- this upstream release addressed Cython 0.19
+    compatibility issue (Closes: #710608)
+  * Recommends numexpr
+  * Re-cythonized using Cython 0.19
+
+  [ Dmitry Shachnev ]
+  * debian/tests/unittests3: use nosetests3 instead of nosetests-3.x.
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Mon, 03 Jun 2013 11:57:43 -0400
+
+pandas (0.11.0-1) experimental; urgency=low
+
+  * New upstream release
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Tue, 23 Apr 2013 22:40:15 -0400
+
+pandas (0.10.1-1) experimental; urgency=low
+
+  * New upstream release
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Tue, 22 Jan 2013 13:07:31 -0500
+
+pandas (0.10.0-1) experimental; urgency=low
+
+  * New upstream release
+    - drops python 2.5 support (we are dropping pyversions in favor of
+      X-Python-Version)
+  * debian/patches:
+    - all previous are in upstream now, dropped locally
+    - added -dsc-patch'es for systems without cython3
+  * debian/control:
+    - added python-statsmodels for the extended tests coverage
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Mon, 17 Dec 2012 12:27:25 -0500
+
+pandas (0.9.1-2) unstable; urgency=low
+
+  [ Julian Taylor ]
+  * Provide python3 packages
+  * Add autopkgtests
+  * debian/patches:
+    - relax-float-tests.patch:
+      replace float equality tests with almost equal
+    - fix-endian-tests.patch:
+      patch from upstream to fix the test failure on big endian machines
+
+  [ Yaroslav Halchenko ]
+  * Upload to unstable
+  * Dropping pysupport
+  * debian/rules:
+    - slight reduction of code duplication between python 2 and 3
+    - cythonize for both python 2 and 3 into separate directories
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Sat, 01 Dec 2012 22:57:47 -0500
+
+pandas (0.9.1-1) experimental; urgency=low
+
+  * New upstream release
+  * Boosted policy to 3.9.3 (no due changes)
+  * debian/rules
+    - Fixed up cleaning up of cythonized files
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Wed, 14 Nov 2012 09:44:14 -0500
+
+pandas (0.9.0-1) experimental; urgency=low
+
+  * New upstream release
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Sun, 07 Oct 2012 21:26:23 -0400
+
+pandas (0.9.0~rc2-1) experimental; urgency=low
+
+  * New upstream release candidate
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Fri, 21 Sep 2012 10:27:52 -0400
+
+pandas (0.8.1-1) unstable; urgency=low
+
+  * Primarily a bugfix upstream release.
+  * up_tag_yahoo_test_requiring_network patch removed.
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Sun, 22 Jul 2012 20:13:16 -0400
+
+pandas (0.8.0-2) unstable; urgency=medium
+
+  * up_tag_yahoo_test_requiring_network patch cherry-picked from upstream
+    GIT so that tests would not be excercised at package build time
+    (Closes: #681449)
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Fri, 13 Jul 2012 08:54:41 -0400
+
+pandas (0.8.0-1) unstable; urgency=low
+
+  * Fresh upstream release
+  * debian/control
+    - drop python-statsmodels from Build-Depends since it might not be yet
+      available on some architectures and is not critical for the test
+    - recommend python-statsmodels instead of deprecated
+      python-scikits.statsmodels
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Fri, 29 Jun 2012 13:02:28 -0400
+
+pandas (0.8.0~rc2+git26-g76c6351-1) experimental; urgency=low
+
+  * Fresh upstream release candidate
+    - all patches dropped (upstreamed)
+    - requires numpy >= 1.6
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Tue, 12 Jun 2012 13:23:27 -0400
+
+pandas (0.7.3-1) unstable; urgency=low
+
+  * Fresh upstream release
+    - few post-release patches (submitted upstream) to exclude unittests
+         requiring network access
+  * debian/control:
+    - python-openpyxl, python-xlwt, python-xlrd into Build-Depends
+      and Recommends
+  * debian/rules:
+    - exclude running tests marked with @network
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Thu, 12 Apr 2012 11:27:31 -0400
+
+pandas (0.7.1+git1-ga2e86c2-1) unstable; urgency=low
+
+  * New upstream release with a bugfix which followed
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Thu, 01 Mar 2012 22:28:10 -0500
+
+pandas (0.7.0-1) unstable; urgency=low
+
+  * New upstream release
+  * Updated pre-cythoned .c files for older Debian/Ubuntu releases.
+    Added a stamp file with upstream version to assure up-to-dateness
+    of the generated files
+  * Dropped all exclusions of unittests and patches -- shouldn't be necessary
+    any longer
+  * Build only for requested versions (not all supported) of Python
+  * Do nothing for build operation, rely on overloaded install
+    (to avoid undesired re-cythonization on elderly Ubuntus)
+  * Adjusted url in watch due to migration of repository under pydata
+    organization
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Mon, 16 Jan 2012 19:31:50 -0500
+
+pandas (0.6.1-1) UNRELEASED; urgency=low
+
+  * New upstream release
+  * python-tk into Build-Depends
+  * Create matplotlibrc with backend: Agg to allow tests run without $DISPLAY
+  * Carry pre-cythonized .c files for systems with older Cython
+  * Skip few tests known to fail
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Tue, 13 Dec 2011 18:36:11 -0500
+
+pandas (0.5.0+git7-gcf32be2-1) unstable; urgency=low
+
+  * New upstream release with post-release fixes
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Tue, 01 Nov 2011 21:15:06 -0400
+
+pandas (0.4.3-1) unstable; urgency=low
+
+  * New upstream release(s): primarily bugfixes and optimizations but also
+    with some minor API changes and new functionality
+  * Adjusted debian/watch to match new layout on github
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Tue, 18 Oct 2011 11:27:50 -0400
+
+pandas (0.4.1-1) unstable; urgency=low
+
+  * New upstream bugfix release
+    - incorporated all debian/patches
+  * debian/rules: 'clean' removes generated pandas/version.py
+  * debian/copyright: adjusted to become DEP-5 compliant
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Sun, 25 Sep 2011 21:48:30 -0400
+
+pandas (0.4.0-1) unstable; urgency=low
+
+  * Initial Debian release (Closes: #641464)
+
+ -- Yaroslav Halchenko <debian@onerussian.com>  Tue, 13 Sep 2011 12:24:05 -0400
diff --git a/contributors_list.txt b/contributors_list.txt
new file mode 100644 (file)
index 0000000..158b808
--- /dev/null
@@ -0,0 +1,3404 @@
+Current to version 2.2.3 (generated by git shortlog -ns)
+There may be multiple entries for the same person if they have used more than one (form of their) name
+https://github.com/pandas-dev/pandas/graphs/contributors
+
+  4765 jbrockmendel
+  3130 Wes McKinney
+  3043 jreback
+  1690 Jeff Reback
+  1350 Joris Van den Bossche
+  1174 Matthew Roeschke
+   943 y-p
+   867 Patrick Hoefler
+   758 Tom Augspurger
+   630 Chang She
+   622 Simon Hawkins
+   607 Phillip Cloud
+   464 gfyoung
+   344 sinhrks
+   327 Adam Klein
+   327 Richard Shadrach
+   297 William Ayd
+   278 Terji Petersen
+   246 Marc Garcia
+   222 Thomas Li
+   205 Luke Manley
+   202 Torsten Wörtwein
+   197 Marco Edward Gorelli
+   196 MomIsBestFriend
+   178 patrick
+   168 Daniel Saxton
+   160 Andy Hayden
+   148 topper-123
+   146 Fangchen Li
+   140 Lumberbot (aka Jack)
+   128 Jeff Tratner
+   122 Jeffrey Tratner
+   119 attack68
+   115 Marco Gorelli
+   113 Dea María Léon
+   113 Natalia Mokeeva
+   109 Vytautas Jancauskas
+   102 Matthew Zeitlin
+    93 Jeremy Schendel
+    93 Pietro Battiston
+    89 Kaiqi Dong
+    89 Skipper Seabold
+    86 alimcmaster1
+    85 h-vetinari
+    84 JHM Darbyshire
+    84 Stephan Hoyer
+    80 Kevin Sheppard
+    76 Wouter Overmeire
+    74 Andrew Wieteska
+    70 jschendel
+    66 Maxim Ivanov
+    65 Sinhrks
+    62 Martin Winkel
+    61 TomAugspurger
+    60 Mortada Mehyar
+    57 Christopher Whelan
+    55 Chris
+    55 Thomas Kluyver
+    54 behzad nouri
+    52 Irv Lustig
+    49 Ka Wo Chen
+    48 Thierry Moisan
+    43 Kieran O'Mahony
+    42 Dieter Vandenbussche
+    38 Paul Reidy
+    34 chris-b1
+    33 Evan Wright
+    32 Jonas Haag
+    32 bwignall
+    31 Jan Schulz
+    31 Mak Sze Chun
+    30 Shoham Debnath
+    30 partev
+    29 Yaroslav Halchenko
+    29 immerrr
+    28 David Stephens
+    27 Avinash Pancham
+    27 Khor Chean Wei
+    27 Yao Xiao
+    26 Gábor Lipták
+    26 Justin Zheng
+    26 Rajat Subhra Mukherjee
+    26 github-actions[bot]
+    26 realead
+    25 Stephen Lin
+    25 Thein Oo
+    25 rockg
+    24 Erfan Nariman
+    23 Fabian Haase
+    23 Yuanhao Geng
+    22 Licht Takeuchi
+    22 Ming Li
+    22 ganevgv
+    21 Ryan Nazareth
+    21 liang3zy22
+    21 moink
+    20 Alex Kirko
+    20 DSM
+    20 Dan Birken
+    20 lexual
+    19 Abraham Flaxman
+    19 Artemy Kolchinsky
+    18 Ali McMaster
+    18 Farhan Reynaldo
+    18 Kerby Shedden
+    18 Noa Tamir
+    18 Robert de Vries
+    18 Tim Swast
+    18 dependabot[bot]
+    18 pilkibun
+    18 unutbu
+    17 Aidan Feldman
+    17 Anthonios Partheniou
+    17 Nicolas Bonnotte
+    17 Uwe L. Korn
+    16 Jiaxiang
+    16 Matt Roeschke
+    16 Pandas Development Team
+    16 Parfait Gasana
+    16 Saurav Chakravorty
+    16 Zhengbo Wang
+    16 danielballan
+    16 mzeitlin11
+    16 onesandzeroes
+    15 Abdullah İhsan Seçer
+    15 Tobias Brandt
+    15 seth-p
+    14 Christopher C. Aycock
+    14 Dr. Irv
+    14 Gregory Rome
+    14 Maximilian Roos
+    14 Roy Hyunjin Han
+    14 Spencer Lyon
+    14 dataxerik
+    14 locojaydev
+    14 rhshadrach
+    13 Alex Rothberg
+    13 John W. O'Brien
+    13 Jonathan Chambers
+    13 Todd Jennings
+    13 Vaibhav Vishal
+    13 willpeppo
+    12 Albert Villanova del Moral
+    12 Alex Rychyk
+    12 Bharat Raghunathan
+    12 Luca Beltrame
+    12 PKEuS
+    12 Quang Nguyễn
+    12 Richard T. Guy
+    12 Thomas Smith
+    12 Tommy
+    12 Wenhuan
+    12 Xiao Yuan
+    11 Andrew Rosenfeld
+    11 Daniel Himmelstein
+    11 Joe Jevnik
+    11 John Zangwill
+    11 Keith Hughitt
+    11 anmyachev
+    11 proost
+    11 rmhowe425
+    11 terrytangyuan
+    10 Aaron Critchley
+    10 Alexander Ponomaroff
+    10 Andrew Hawyrluk
+    10 Anjali2019
+    10 Dale Jung
+    10 DriesS
+    10 Eric Leung
+    10 Felix Dulys
+    10 Garrett Drapala
+    10 John McNamara
+    10 K.-Michael Aye
+    10 Kendall Masse
+    10 Martin Blais
+    10 Nick Eubank
+    10 Sam Foo
+    10 Shahar Naveh
+    10 cbertinato
+    10 luke396
+     9 3vts
+     9 Damien Garaud
+     9 Daniel Isaac
+     9 Deepyaman Datta
+     9 Guillaume Gay
+     9 Jack Goldsmith
+     9 Jacob Schaer
+     9 Mateusz Sokół
+     9 Michael Marino
+     9 Shao Yang Hong
+     9 Tobias Pitters
+     9 Tong SHEN
+     9 Vasily Litvinov
+     9 Viktor Kerkez
+     9 cleconte987
+     9 poloso
+     9 rebecca-palmer
+     8 Adam J. Stewart
+     8 Aly Sivji
+     8 Brandon Bradley
+     8 Chuck Cadman
+     8 Conrad Mcgee Stocks
+     8 Dr-Irv
+     8 Hugh Kelley
+     8 Joel Nothman
+     8 Kalyan Gokhale
+     8 Mike Kelly
+     8 Oleh Kozynets
+     8 OlivierLuG
+     8 Parthi
+     8 RaisaDZ
+     8 Shawn Heide
+     8 Sylvain Marié
+     8 Thomas A Caswell
+     8 Thomas Grainger
+     8 Varun
+     8 Vyom Pathak
+     8 himanshu awasthi
+     8 iasoon
+     8 jnmclarty
+     8 smij720
+     8 taytzehao
+     8 tpaxman
+     7 Benedikt Sauer
+     7 Chuanzhu Xu
+     7 Dan Allan
+     7 Daniel Schmidt
+     7 Dennis Chukwunta
+     7 Derek McCammond
+     7 Francis T. O'Donovan
+     7 Honfung Wong
+     7 Janosh Riebesell
+     7 Jason Sexauer
+     7 Johannes Mueller
+     7 Kernc
+     7 Kostya Farber
+     7 Matthias Bussonnier
+     7 Michael Mueller
+     7 Nirav
+     7 Robin to Roxel
+     7 Rushabh Vasani
+     7 Samesh Lakhotia
+     7 Stefanie Molin
+     7 Stijn Van Hoey
+     7 Takafumi Arakaki
+     7 William Andrea
+     7 Winterflower
+     7 Younggun Kim
+     7 Yury Mikhaylov
+     7 abonte
+     7 dsm054
+     7 ftrihardjo
+     7 nipunreddevil
+     7 nrebena
+     7 rohanjain101
+     7 scls19fr
+     7 themien
+     7 uzzell
+     6 Aarni Koskela
+     6 ArtinSarraf
+     6 Asish Mahapatra
+     6 Benjamin Rowell
+     6 Bill
+     6 Brad Buran
+     6 Bran Yang
+     6 Brian Wignall
+     6 Chris Whelan
+     6 David Bew
+     6 FactorizeD
+     6 Garrett-R
+     6 Graham Inggs
+     6 Guillaume Lemaitre
+     6 HH-MWB
+     6 Isaac Virshup
+     6 Jeff Carey
+     6 Kashif Khan
+     6 Kelsey Jordahl
+     6 Loic Diridollou
+     6 Mateusz Woś
+     6 Matt Kirk
+     6 Matti Picus
+     6 Micael Jarniac
+     6 Neil Parley
+     6 Nicholaus E. Halecky
+     6 Nick Crews
+     6 Piotr Jucha
+     6 Roman Yurchak
+     6 Rouz Azari
+     6 Sean Patrick Malloy
+     6 Stephen Hoover
+     6 Steve
+     6 Sven
+     6 Thomas Dickson
+     6 Tola A
+     6 Uddeshya Singh
+     6 Vincent Arel-Bundock
+     6 WANG Aiyong
+     6 Wes Turner
+     6 ZhuBaohe
+     6 agijsberts
+     6 agraboso
+     6 ajcr
+     6 clham
+     6 hasan-yaman
+     6 ischwabacher
+     6 kernc
+     6 krajatcl
+     6 mproszewska
+     6 ram vikram singh
+     6 rbenes
+     6 saehuihwang
+     6 seljaks
+     6 smartvinnetou
+     6 timmie
+     6 tmnhat2001
+     5 AG
+     5 Adam Obeng
+     5 Addison Lynch
+     5 Ajay Saxena
+     5 Alan Velasco
+     5 Alexandre Prince-Levasseur
+     5 Ambuj Pawar
+     5 Andrew
+     5 Ben Beasley
+     5 Ben Kandel
+     5 Chapman Siu
+     5 Clark Fitzgerald
+     5 Clemens Brunner
+     5 DataOmbudsman
+     5 David Cottrell
+     5 Douglas McNeil
+     5 Galuh Sahid
+     5 Gjelt
+     5 Guilherme Beltramini
+     5 Janelle Zoutkamp
+     5 Jay
+     5 Joeperdefloep
+     5 Jon Mease
+     5 Joris Vankerschaver
+     5 Josh
+     5 Josh Dimarsky
+     5 Ksenia
+     5 Lars Buitinck
+     5 LeakedMemory
+     5 Linus Sommer
+     5 Loïc Estève
+     5 Luis Pinto
+     5 Marc Abramowitz
+     5 Mark Wiebe
+     5 Max Chen
+     5 Michael Hsieh
+     5 Nicholas Musolino
+     5 Oğuzhan Öğreden
+     5 Pauli Virtanen
+     5 Peter
+     5 Philip
+     5 Prabakaran Kumaresshan
+     5 Prakhar Pandey
+     5 Ralf Gommers
+     5 Sangwoong Yoon
+     5 Scott E Lasley
+     5 Shantanu
+     5 Sumanau Sareen
+     5 Tao He
+     5 Tarbo Fukazawa
+     5 Taylor Packard
+     5 Ted Petrou
+     5 Tim Hoffmann
+     5 Tomaz Berisa
+     5 TrigonaMinima
+     5 Troels Nielsen
+     5 Vamsi Verma
+     5 William Blan
+     5 Xingrong Chen
+     5 Yuichiro Kaneko
+     5 avinashpancham
+     5 benjamin
+     5 dieterv77
+     5 dilex42
+     5 donK23
+     5 fathomer
+     5 kylekeppler
+     5 pre-commit-ci[bot]
+     5 pv8493013j
+     5 tshauck
+     5 yui-knk
+     4 Adam Greenhall
+     4 Ahmed Ibrahim
+     4 Alex Lim
+     4 Alvaro Tejero-Cantero
+     4 Andreas Würl
+     4 Anna Daglis
+     4 Anthony Milbourne
+     4 Armin Varshokar
+     4 Avi Sen
+     4 Benjamin Thyreau
+     4 Blake Hawkins
+     4 Bob Haffner
+     4 Brenda Moon
+     4 Brett Naul
+     4 Brian Hulette
+     4 Caleb Epstein
+     4 Calvin Ho
+     4 Carlotta Fabian
+     4 Chalmer Lowe
+     4 Cheuk Ting Ho
+     4 Chris Billington
+     4 Chris Warth
+     4 Chris Zimmerman
+     4 Christian Hudon
+     4 DaanVanHauwermeiren
+     4 Dan Miller
+     4 Daniel Waeber
+     4 Danil Iashchenko
+     4 Dave Hughes
+     4 David Adrián Cañones Castellano
+     4 Dmitriy
+     4 Dražen Lučanin
+     4 Ekaterina
+     4 Erdi
+     4 Erik
+     4 Fred Reiss
+     4 Gianluca Rossi
+     4 Giftlin Rajaiah
+     4 Goyo
+     4 Gregg Lind
+     4 Henning Sperr
+     4 JMBurley
+     4 Jacques Kvam
+     4 James Myatt
+     4 JennaVergeynst
+     4 Jessica Greene
+     4 Jev Kuznetsov
+     4 Jiang Yue
+     4 Jim Crist
+     4 John Karasinski
+     4 John Zwinck
+     4 Jonathan Shreckengost
+     4 Junya Hayashi
+     4 Kevin
+     4 Kevin Stone
+     4 Krishna
+     4 Kyle Meyer
+     4 Laura Collard, PhD
+     4 Levi Ob
+     4 Lorenzo Maffioli
+     4 Lucas Rodés-Guirao
+     4 Mabel Villalba
+     4 Marian Denes
+     4 Mark Graham
+     4 Matias Heikkilä
+     4 Matt Wittmann
+     4 Matthew Gilbert
+     4 Max van Deursen
+     4 Michael Tiemann
+     4 Nathan Abel
+     4 Nathan Goldbaum
+     4 Nicholas Ver Halen
+     4 Nick Anderson
+     4 OXPHOS
+     4 Olivier Grisel
+     4 Oluokun Adedayo
+     4 Paul Ivanov
+     4 Philippe THOMY
+     4 Qbiwan
+     4 Ram Rachum
+     4 Rob
+     4 Robert Gieseke
+     4 Roger Thomas
+     4 Samuel Sinayoko
+     4 Shane Conway
+     4 Shashwat Agrawal
+     4 Shivam Rana
+     4 Sofiane Mahiou
+     4 Srinivas Reddy Thatiparthy (శ్రీనివాస్  రెడ్డి తాటిపర్తి)
+     4 Stephen Rauch
+     4 Stéphan Taljaard
+     4 Trent Hauck
+     4 Tyler Reddy
+     4 Varun Shrivastava
+     4 Vijay Vaidyanathan
+     4 Vincent La
+     4 Vladimir Filimonov
+     4 Vyomkesh Tripathi
+     4 Wenjun Si
+     4 Will Holmgren
+     4 Yasin Tatar
+     4 Yoshiki Vázquez Baeza
+     4 Zero
+     4 akosel
+     4 alm
+     4 auderson
+     4 cel4
+     4 cgohlke
+     4 chapman siu
+     4 gliptak
+     4 hugo
+     4 junk
+     4 kathleenhang
+     4 kota matsuoka
+     4 luzpaz
+     4 robbuckley
+     4 rosagold
+     4 srotondo
+     4 vangorade
+     4 waitingkuo
+     4 wcwagner
+     3 Aaditya Panikath
+     3 Abdullah Ihsan Secer
+     3 Abhijeet Krishnan
+     3 Adam Hooper
+     3 Aleksey Bilogur
+     3 Alex Alekseyev
+     3 Alex Buzenet
+     3 Alexander Buchkovsky
+     3 Alfonso MHC
+     3 Alp Arıbal
+     3 Amanda Bizzinotto
+     3 Andreas Winkler
+     3 Andrew Wood
+     3 Angelos Evripiotis
+     3 Aniruddha Bhattacharjee
+     3 Anjana S
+     3 Anthony Givans
+     3 Anton I. Sipos
+     3 Baurzhan Muftakhidinov
+     3 BeanNan
+     3 Ben Welsh
+     3 Benjamin Beier Liu
+     3 Benoît Vinot
+     3 Bhavani Ravi
+     3 Big Head
+     3 Boris Rumyantsev
+     3 Brandon M. Burroughs
+     3 Brian Sun
+     3 Brock Mendel
+     3 Carlos Souza
+     3 Chris Bertinato
+     3 Chris Mazzullo
+     3 Christoph Gohlke
+     3 Christos Petropoulos
+     3 DaPy15
+     3 DanielFEvans
+     3 Dave Hirschfeld
+     3 Dave Willmer
+     3 David Krych
+     3 David Poznik
+     3 Devin Petersohn
+     3 Dillon Niederhut
+     3 DimiGrammatikakis
+     3 Doug Latornell
+     3 Dries Schaumont
+     3 EdAbati
+     3 Elle
+     3 Elliot Rampono
+     3 Eric Chea
+     3 Felix Divo
+     3 Francesc Via
+     3 Gabriel Monteiro
+     3 Gesa Stupperich
+     3 Giacomo Ferroni
+     3 Gianluca Ficarelli
+     3 Giftlin
+     3 Grant Smith
+     3 Guillaume Poulin
+     3 Hammad Mashkoor
+     3 Hans
+     3 Haochen Wu
+     3 Harshavardhan Bachina
+     3 Hielke Walinga
+     3 Horace Lai
+     3 Hubert
+     3 Hugues Valois
+     3 Hyukjin Kwon
+     3 Iain Barr
+     3 Ingolf Becker
+     3 Iqrar Agalosi Nureyza
+     3 Isaac Chung
+     3 Israel Saeta Pérez
+     3 Ivan Nazarov
+     3 Jack Bicknell
+     3 Jack Liu
+     3 Jake VanderPlas
+     3 James Cobon-Kerr
+     3 Jan Rudolph
+     3 Jan-Philip Gehrcke
+     3 Jared Groves
+     3 Jarrod Millman
+     3 Jean Helie
+     3 Jean-Mathieu Deschenes
+     3 Jeremy Tuloup
+     3 Jeroen Kant
+     3 Jesper Dramsch
+     3 Jesse Farnham
+     3 Joel Ostblom
+     3 John Freeman
+     3 John Mantios
+     3 John McGuigan
+     3 Joon Ro
+     3 Jordan Hicks
+     3 Josh Friedlander
+     3 Josh Klein
+     3 Josiah Baker
+     3 José Duarte
+     3 José Lucas Mayer
+     3 Julia Signell
+     3 Justin Essert
+     3 Kamil Trocewicz
+     3 Kang Su Min
+     3 Kapil E. Iyer
+     3 Karmel Allison
+     3 Kate Surta
+     3 Keshav Ramaswamy
+     3 Kian Eliasi
+     3 Kim, KwonHyun
+     3 Krishna Chivukula
+     3 Kyle Barron
+     3 Lawrence Mitchell
+     3 Leonardus Chen
+     3 Liam3851
+     3 Louis Huynh
+     3 Luca Pizzini
+     3 MBrouns
+     3 Maren Westermann
+     3 Markus Meier
+     3 Martin Durant
+     3 Martina Oefelein
+     3 Mateusz
+     3 Matheus Felipe
+     3 Mathis Felardos
+     3 Matt Braymer-Hayes
+     3 Matteo Santamaria
+     3 Matthew Brett
+     3 Matus Valo
+     3 Max Bolingbroke
+     3 Maximiliano Greco
+     3 Mike Kutzma
+     3 Miroslav Šedivý
+     3 Mitar
+     3 Mohammad Jafar Mashhadi
+     3 Myles Braithwaite
+     3 Naomi Bonnin
+     3 Nate Yoder
+     3 Nick Pentreath
+     3 Noah
+     3 Noam Hershtig
+     3 Nofar Mishraki
+     3 Pamela Wu
+     3 Patrick O'Brien
+     3 Paul
+     3 Paul Ganssle
+     3 Pedro Nacht
+     3 Philipp A
+     3 Piotr Niełacny
+     3 Pradyumna Rahul
+     3 Prasanjit Prakash
+     3 Prithvijit
+     3 Pulkit Maloo
+     3 Rahul Sathanapalli
+     3 Randy Carnevale
+     3 Ray Bell
+     3 Riccardo Magliocchetti
+     3 Richard Höchenberger
+     3 Rik-de-Kort
+     3 Robin
+     3 Robin Wilson
+     3 Rohith295
+     3 Ryan
+     3 Safia Abdalla
+     3 Sahil Dua
+     3 Sai-Suraj-27
+     3 Salahuddin
+     3 Sanjith Chockan
+     3 Sarthak Vineet Kumar
+     3 SatheeshKumar Mohan
+     3 Scott Harp
+     3 Sebastian Bank
+     3 Shorokhov Sergey
+     3 Siddhartha Gandhi
+     3 Simon Gibbons
+     3 Souvik Mandal
+     3 Sparkle Russell-Puleri
+     3 Stefan Krawczyk
+     3 Stefania Delprete
+     3 Stefanie Senger
+     3 Stefano Cianciulli
+     3 Stelios Petrakis
+     3 Steven Rotondo
+     3 Suvayu Ali
+     3 TLouf
+     3 Tania Allard
+     3 Thijs Damsma
+     3 Thomas Yu
+     3 Tjerk Santegoeds
+     3 Toby Dylan Hocking
+     3 Tom Ajamian
+     3 Tom Bird
+     3 UrielMaD
+     3 Vaibhav K
+     3 Victoria Zdanovskaya
+     3 Yago González
+     3 Yanxian Lin
+     3 Yian
+     3 Zac Hatfield-Dodds
+     3 Zheyuan
+     3 Zito Relova
+     3 abmyii
+     3 adneu
+     3 aflah02
+     3 alphaCTzo7G
+     3 aneesh98
+     3 anomrake
+     3 arredond
+     3 bang128
+     3 beanan
+     3 caneff
+     3 davidshinn
+     3 dengemann
+     3 deponovo
+     3 duozhang
+     3 ehsan shirvanian
+     3 fjetter
+     3 guru kiran
+     3 iofall
+     3 jdeschenes
+     3 jen w
+     3 jnecus
+     3 joshuaar
+     3 linebp
+     3 lukasbk
+     3 mcjcode
+     3 miker985
+     3 nathalier
+     3 neelmraman
+     3 ogiaquino
+     3 omar-elbaz
+     3 paulreece
+     3 prossahl
+     3 raj-thapa
+     3 rekcahpassyla
+     3 ri938
+     3 shawnbrown
+     3 shteken
+     3 shubham11941140
+     3 stonebig
+     3 tehunter
+     3 thatneat
+     3 tushushu
+     3 tv3141
+     3 unknown
+     3 usersblock
+     3 za
+     3 zach powers
+     2 ABCPAN-rank
+     2 Aaron Staple
+     2 Abhishek Mangla
+     2 Achmad Syarif Hidayatullah
+     2 Adam Bull
+     2 Adam Marcus
+     2 Adam Turner
+     2 Adrian Mastronardi
+     2 Ahmad
+     2 Aidan Montare
+     2 Alex Chase
+     2 Alex Gaudio
+     2 Alex Hall
+     2 Alex Volkov
+     2 Alex Watt
+     2 Alex-Gregory-1
+     2 Alexander Michael Schade
+     2 Alexis Mignon
+     2 Aleš Erjavec
+     2 Allen Downey
+     2 Allison Kwan
+     2 Andrew Burrows
+     2 Andrew Chen
+     2 Andy
+     2 Andy R. Terrel
+     2 Angela Ambroz
+     2 Antoine Mazières
+     2 Anton Shevtsov
+     2 Arda Kosar
+     2 Armin Berres
+     2 Artem Bogachev
+     2 Artem Vorobyev
+     2 Ashwin Srinath
+     2 Atsushi Nukariya
+     2 Ayla Khan
+     2 BarkotBeyene
+     2 Batalex
+     2 Ben Alex
+     2 Ben Greiner
+     2 Ben Schiller
+     2 Ben Thayer
+     2 Benjamin Adams
+     2 Bernard Willers
+     2 Bhuvana KA
+     2 Bill Letson
+     2 Bobae Kim
+     2 Boyd Kane
+     2 Bryan Cutler
+     2 Camilo Cota
+     2 Carol Willing
+     2 Chamoun Saoma
+     2 Chandrasekaran Anirudh Bhardwaj
+     2 Chankey Pathak
+     2 Charalampos Papaloizou
+     2 Charles David
+     2 Chris Grinolds
+     2 Chris Lynch
+     2 Chris Reynolds
+     2 Chris Stadler
+     2 Chris Stoafer
+     2 Chris Withers
+     2 ChrisAlbertsen
+     2 Christer van der Meeren
+     2 Christian Chwala
+     2 ChristofKaufmann
+     2 CloseChoice
+     2 Cody
+     2 Compro Prasad
+     2 Damian Kula
+     2 Dan Ringwalt
+     2 Daniel Grady
+     2 Daniel Hrisca
+     2 Daniel I
+     2 Dare Adewumi
+     2 Data & Code Expert Experimenting with Code on Data
+     2 DavaIlhamHaeruzaman
+     2 David Arcos
+     2 David Cook
+     2 David Gwynne
+     2 David Li
+     2 David Stansby
+     2 Deepang Raval
+     2 Dennis J. Gray
+     2 DeviousLab
+     2 Devjeet Roy
+     2 Diane Trout
+     2 Diego Argueta
+     2 Digres45
+     2 Dom
+     2 Doran Deluz
+     2 Doug Davis
+     2 Douglas Hanley
+     2 Douglas Rudd
+     2 Dražen Lučanin
+     2 Drew Seibert
+     2 Dror Atariah
+     2 Edoardo Abati
+     2 Eduardo Schettino
+     2 Egor
+     2 Egor Panfilov
+     2 Eirik
+     2 Elliott Sales de Andrade
+     2 Emiliano Jordan
+     2 Endre Mark Borza
+     2 Eric Chlebek
+     2 Eric Wieser
+     2 EricLeer
+     2 Eve
+     2 Fabian Gabel
+     2 Fabian Gebhart
+     2 Fabian Retkowski
+     2 Fabian Rost
+     2 Fabio Zanini
+     2 Fabrizio Primerano
+     2 Florian Jetter
+     2 Future Programmer
+     2 Fábio Rosado
+     2 Gabriel Corona
+     2 Gabriel Di Pardi Arruda
+     2 Gabriel Tutui
+     2 Gaurav Sheni
+     2 Gautham
+     2 George Hartzell
+     2 Geraint Duck
+     2 Giacomo Caria
+     2 Gina
+     2 Gioia Ballin
+     2 Giovanni Lanzani
+     2 Graham Jeffries
+     2 Grant Roch
+     2 Guillaume Horel
+     2 Guo Ci
+     2 Gustavo C. Maciel
+     2 H L
+     2 Hamed Saljooghinejad
+     2 Hannah Ferchland
+     2 Harald Husum
+     2 Hassan Kibirige
+     2 Henry Kleynhans
+     2 How Si Wei
+     2 HubertKl
+     2 Hugo van Kemenade
+     2 HyunTruth
+     2 Hyungtae Kim
+     2 Ian Eaves
+     2 Ian Henriksen
+     2 Iblis Lin
+     2 Ignacio Santolin
+     2 Ilya V. Schurov
+     2 Ivan Smirnov
+     2 JDkuba
+     2 Jack Kelly
+     2 Jacob Peacock
+     2 Jacopo Rota
+     2 Jaehoon Hwang
+     2 Jaidev Deshpande
+     2 Jaime Di Cristina
+     2 James Draper
+     2 James Lamb
+     2 Jan Koch
+     2 Jan Škoda
+     2 Janus
+     2 Jaume Bonet
+     2 Javad Noorbakhsh
+     2 Jay Parlar
+     2 Jeet Parekh
+     2 Jeff Knupp
+     2 Jeff Mellen
+     2 Jeffrey Gerard
+     2 Jessica M
+     2 Jethro Cao
+     2 Jiawei Zhang
+     2 Jimmy Callin
+     2 Jing Qiang Goh
+     2 Joao Victor Martinelli
+     2 Joaq Almirante
+     2 Joe Bradish
+     2 JohannaTrost
+     2 John David Reaver
+     2 John G Evans
+     2 John Liekezer
+     2 John-Colvin
+     2 Johnny Pribyl
+     2 Jon M. Mease
+     2 Jonas Abernot
+     2 Jonathan deWerd
+     2 Jordi Contestí
+     2 Jose Quinones
+     2 JoseNavy
+     2 Juarez Bochi
+     2 Julia Evans
+     2 Julien Danjou
+     2 Jung Dong Ho
+     2 Justin Bozonier
+     2 Justin Lecher
+     2 Justin McOmie
+     2 KOBAYASHI Ittoku
+     2 Kamil Kisiel
+     2 Kang Yoosam
+     2 Kara de la Marck
+     2 Karthigeyan
+     2 Karthik Mathur
+     2 Kasim Panjri
+     2 Katie Smith
+     2 Katrin Leinweber
+     2 Kee Chong Tan
+     2 Kenny Huynh
+     2 Kevin Anderson
+     2 Kevin Bowey
+     2 Kevin Jan Anker
+     2 KotlinIsland
+     2 Koustav Samaddar
+     2 Kunal Gosar
+     2 Kyle Kelley
+     2 Kyle Prestel
+     2 LJ
+     2 Lars Lien Ankile
+     2 Leif Johnson
+     2 Leif Walsh
+     2 Leo Razoumov
+     2 Linus
+     2 Liwei Cai
+     2 Lorenzo Vainigli
+     2 Luca Scarabello
+     2 Lucas Kushner
+     2 Lucas Scarlato Astur
+     2 Luke
+     2 Mabroor Ahmed
+     2 Mahmoud Lababidi
+     2 Manan Pal Singh
+     2 Manraj Singh
+     2 Marc
+     2 Marco Hemken
+     2 Marco Neumann
+     2 Margaret Sy
+     2 Marko Pacak
+     2 Martin Fleischmann
+     2 Martina G. Vilas
+     2 Mason Gallo
+     2 Mats Maiwald
+     2 Matt Maybeno
+     2 Matt Richards
+     2 Maxim Veksler
+     2 Meghana Varanasi
+     2 Michael
+     2 Michael Charlton
+     2 Michael Odintsov
+     2 Michael Penkov
+     2 Michael Schatzow
+     2 Michael W Schatzow
+     2 Michael Wang
+     2 Michał Górny
+     2 Miguel
+     2 Mike Phung
+     2 Min RK
+     2 Mitch Negus
+     2 Mohamed Amine ZGHAL
+     2 Mohammad Hasnain Mohsin Rajan
+     2 Mohammed Kashif
+     2 Monson Shao
+     2 Muktan
+     2 Natalie Jann
+     2 Nathalie Rud
+     2 Nathan Pinger
+     2 Naveen Michaud-Agrawal
+     2 Nick Chmura
+     2 Nico Cernek
+     2 Nicolas Dickreuter
+     2 Nikhil Choudhary
+     2 Nikhil Kumar Mengani
+     2 Nipun Batra
+     2 Noritada Kobayashi
+     2 Numan Ijaz
+     2 Oleg Shteynbuk
+     2 Olga Matoula
+     2 Oli
+     2 Oliver Hofkens
+     2 Ondrej Kokes
+     2 Ondřej Čertík
+     2 Paddy Mullen
+     2 Pankaj Pandey
+     2 Paolo Lammens
+     2 Paras Gupta
+     2 Patrick Cando
+     2 Patrick O'Keeffe
+     2 Paul Lee
+     2 Paul Sanders
+     2 Pav A
+     2 Pawel Kordek
+     2 Pedro Reys
+     2 Peter Prettenhofer
+     2 Phan Duc Nhat Minh
+     2 Philipp Schaefer
+     2 Pierre Haessig
+     2 Piotr Chromiec
+     2 Piyush Aggarwal
+     2 Prayag Savsani
+     2 Prerana Chakraborty
+     2 Punitvara
+     2 Rafal Skolasinski
+     2 Raghav
+     2 Rahul Chauhan
+     2 Rajib Mitra
+     2 RaphSku
+     2 Ravi Kumar Nimmi
+     2 Rick
+     2 Rinoc Johnson
+     2 Rob Levy
+     2 Robert Bradshaw
+     2 Robert Meyer
+     2 Roei.r
+     2 Roger
+     2 Roger Erens
+     2 Rohit Sanjay
+     2 Roman Pekar
+     2 Ronan Lamy
+     2 Roshni
+     2 Ruan Pretorius
+     2 RuiDC
+     2 Ruijing Li
+     2 Ryan Rehman
+     2 Sachin Yadav
+     2 Sahid Velji
+     2 Sam Cohan
+     2 Sam Rao
+     2 SanthoshBala18
+     2 Sarah Donehower
+     2 Saul Shanabrook
+     2 Sašo Stanovnik
+     2 Scott Lasley
+     2 Scott Sanderson
+     2 Scott Talbert
+     2 Seb
+     2 Sebastian Berg
+     2 Sebastián Vanrell
+     2 Shadi Akiki
+     2 Shahul Hameed
+     2 Shantanu Gontia
+     2 Shawn Zhong
+     2 Sheppard, Kevin
+     2 Shiv Gupta
+     2 Shuangchi He
+     2 Sietse Brouwer
+     2 Simon Knott
+     2 Simon-Martin Schröder
+     2 Simone Basso
+     2 SleepingPills
+     2 Somtochi Umeh
+     2 Soyoun(Rose) Kim
+     2 Stefaan Lippens
+     2 Steffen Rehberg
+     2 Stephen Childs
+     2 Stephen Pascoe
+     2 Stephen Simmons
+     2 Steve Cook
+     2 Steven Schaerer
+     2 SylvainLan
+     2 T. JEGHAM
+     2 Tan Tran
+     2 Tanya Jain
+     2 Tegar D Pratama
+     2 Theodoros Nikolaou
+     2 Thiviyan Thanapalasingam
+     2 Thomas J Fan
+     2 Tiago Antao
+     2 Tiago Requeijo
+     2 Tim D. Smith
+     2 Timon Jurschitsch
+     2 Todd DeLuca
+     2 Tom Aarsen
+     2 Tomoyuki Suzuki
+     2 Tony Lorenzo
+     2 Tony Tao
+     2 Travis N. Vaught
+     2 Tushar Gupta
+     2 Tushar Mittal
+     2 Tux1
+     2 Umberto Fasci
+     2 Valentin Haenel
+     2 Valerii
+     2 Varad Gunjal
+     2 Venaturum
+     2 Victor Villas
+     2 Vikram Bhandoh
+     2 Vipul Rai
+     2 Vu Le
+     2 Vytautas Jančauskas
+     2 WBare
+     2 Waltteri Koskinen
+     2 Warren White
+     2 Wilfred Hughes
+     2 Will Ayd
+     2 Will Furnass
+     2 WillAyd
+     2 Wuraola Oyewusi
+     2 Xbar
+     2 Yan Facai
+     2 Yassir Karroum
+     2 Yimeng Zhang
+     2 Yoav Ram
+     2 Yong Kai Yi
+     2 Yuecheng Wu
+     2 Yutaro Ikeda
+     2 Yvan Gatete
+     2 Yves Delley
+     2 Zach Angell
+     2 Zak Kohler
+     2 adatasetaday
+     2 akittredge
+     2 andresmcneill
+     2 ante328
+     2 bashtage
+     2 bjonen
+     2 bolkedebruin
+     2 broessli
+     2 ccccjone
+     2 cgangwar11
+     2 charalampos papaloizou
+     2 charlogazzo
+     2 chinhwee
+     2 coco
+     2 codamuse
+     2 conquistador1492
+     2 csfarkas
+     2 dahlbaek
+     2 dan1261
+     2 danielplawrence
+     2 dannyhyunkim
+     2 david-liu-brattle-1
+     2 davidwales
+     2 deflatSOCO
+     2 discort
+     2 dlovell
+     2 dwkenefick
+     2 ehallam
+     2 elpres
+     2 eshirvana
+     2 fjdiod
+     2 froessler
+     2 fshi01
+     2 gabrielvf1
+     2 gdex1
+     2 ghasemnaddaf
+     2 gunjan-solanki
+     2 hardikpnsp
+     2 i-aki-y
+     2 iamshwin
+     2 ianzur
+     2 jaimefrio
+     2 jakirkham
+     2 jeschwar
+     2 jfadia
+     2 jlamborn324
+     2 jmholzer
+     2 jmorris0x0
+     2 jonaslb
+     2 joooeey
+     2 joshuabello2550
+     2 keitakurita
+     2 killerontherun1
+     2 kpapdac
+     2 krasch
+     2 krsnik93
+     2 lacrosse91
+     2 llllllllll
+     2 louispotok
+     2 lrjball
+     2 ma3da
+     2 maroth96
+     2 mattip
+     2 michaelws
+     2 michal-gh
+     2 mikeronayne
+     2 mliu08
+     2 mlondschien
+     2 mschmookler
+     2 msund
+     2 mtrbean
+     2 nealxm
+     2 neilkg
+     2 nlepleux
+     2 nsuresh
+     2 nullptr
+     2 ohad83
+     2 ottiP
+     2 paradox-lab
+     2 pedrooa
+     2 phaebz
+     2 priyankjain
+     2 ptype
+     2 qudade
+     2 reidy-p
+     2 rjfs
+     2 rlukevie
+     2 roch
+     2 rtpsw
+     2 rvernica
+     2 ryangilmour
+     2 s-scherrer
+     2 scotthavard92
+     2 springcoil
+     2 srib
+     2 srinivasan
+     2 ssikdar1
+     2 sstiijn
+     2 stanleycai95
+     2 stphnlyd
+     2 svenharris
+     2 taeold
+     2 tiagohonorato
+     2 tim smith
+     2 timhunderwood
+     2 tobycheese
+     2 tolhassianipar
+     2 tomneep
+     2 tonywu1999
+     2 tonyyyyip
+     2 tsdlovell
+     2 tzinckgraf
+     2 wcgonzal
+     2 westurner
+     2 xpvpc
+     2 yogendrasoni
+     2 yonashub
+     2 zeitlinv
+     2 Ádám Lippai
+     1 1_x7
+     1 21CSM
+     1 3553x
+     1 5j9
+     1 A Brooks
+     1 A. Flaxman
+     1 AJ Dyka
+     1 AJ Pryor, Ph.D
+     1 ARF
+     1 Aadharsh-Acharya
+     1 Aadhi Manivannan
+     1 Aaron Barber
+     1 Aaron Rahman
+     1 Aaron Schumacher
+     1 Aaron Toth
+     1 Aashish KC
+     1 Abbie Popa
+     1 AbdealiJK
+     1 AbdulMAbdi
+     1 Abhijit Deo
+     1 Abhiraj Hinge
+     1 Abhishek R
+     1 Abo7atm
+     1 Acanthostega
+     1 Ada Draginda
+     1 Adam Bowden
+     1 Adam Chainz
+     1 Adam Gleave
+     1 Adam Kim
+     1 Adam Klaum
+     1 Adam Klimont
+     1 Adam Mróz
+     1 Adam Ormondroyd
+     1 Adam Smith
+     1 Adam Spannbauer
+     1 Adam W Bagaskarta
+     1 AdamShamlian
+     1 Aditya Agarwal
+     1 Aditya Anulekh
+     1 Adrian
+     1 Adrian D'Alessandro
+     1 Adrian Liaw
+     1 Adrien Emery
+     1 Adrien RUAULT
+     1 Agustín Herranz
+     1 Ahmad Mustafa Anis
+     1 Aidos Kanapyanov
+     1 Aivengoe
+     1 Ajitesh Singh
+     1 Akash Tandon
+     1 Akbar Septriyan
+     1 Akos Furton
+     1 Akshat Jain
+     1 Akshay Babbar
+     1 Alan Du
+     1 Alan Hogue
+     1 Alan Yee
+     1 Alastair James
+     1 Alastair Porter
+     1 Alejandro Giacometti
+     1 Alejandro Hall
+     1 Alejandro Hohmann
+     1 Aleksa Radojicic
+     1 Aleksandr Drozd
+     1 Alessandro Amici
+     1 Alessandro Bisiani
+     1 Alex B
+     1 Alex Itkes
+     1 Alex Lubbock
+     1 Alex Malins
+     1 Alex Marchenko
+     1 Alex Povel
+     1 Alex Radu
+     1 Alex Strick van Linschoten
+     1 Alex Thorne
+     1 Alex Vig
+     1 Alex-Blade
+     1 AlexTereshenkov
+     1 Alexander Gorodetsky
+     1 Alexander Hendorf
+     1 Alexander Hess
+     1 Alexander Lenail
+     1 Alexander Nordin
+     1 Alexander Regueiro
+     1 Alexander Seiler
+     1 Alexandra Sciocchetti
+     1 Alexandre Batisse
+     1 Alexandre Decan
+     1 Alexey Györi
+     1 Alfredo Granja
+     1 Ali Asgar
+     1 Alibi
+     1 Allen Riddell
+     1 AllenDowney
+     1 Allison Browne
+     1 Alok Singhal
+     1 Alvaro Aleman
+     1 Alyssa Fu Ward
+     1 Aman Thakral
+     1 Amanda Dsouza
+     1 Amay Patel
+     1 Amim Knabben
+     1 Amith KK
+     1 Amol
+     1 Amol Agrawal
+     1 Amol K
+     1 Amol Kahat
+     1 Amy Graham
+     1 Andras Deak
+     1 Andrea Bedini
+     1 Andreas Buhr
+     1 Andreas H.
+     1 Andreas Klostermann
+     1 Andreas Költringer
+     1 Andreas Schwab
+     1 Andrei Batomunkuev
+     1 Andres Algaba
+     1 Andrew Bui
+     1 Andrew Eckart
+     1 Andrew Fiore-Gartland
+     1 Andrew Gaspari
+     1 Andrew Gross
+     1 Andrew Kittredge
+     1 Andrew McPherson
+     1 Andrew Munch
+     1 Andrew Schonfeld
+     1 Andrew Shumanskiy
+     1 Andrew Spott
+     1 Andrew 亮
+     1 András Novoszáth
+     1 André Jonasson
+     1 Andy Craze
+     1 Andy Grigg
+     1 Andy Li
+     1 Aneta Kahleová
+     1 Angela Seo
+     1 AnglinaBhambra
+     1 Anh Le
+     1 Aniket Patil
+     1 Aniket uttam
+     1 Anil Kumar Pallekonda
+     1 Anirudh Hegde
+     1 Anjali Singh
+     1 Ankit Dhankhar
+     1 Ankush Dua
+     1 Anshoo Rajput
+     1 Anthony O'Brien
+     1 Antoine Pitrou
+     1 Antoine Viscardi
+     1 Anton Lodder
+     1 Antonio Andraues Jr
+     1 Antonio Fonseca
+     1 Antonio Gutierrez
+     1 Antonio Linde
+     1 Antonio Molina
+     1 Antonio Ossa-Guerra
+     1 Antonio Quinonez
+     1 Antony Evmorfopoulos
+     1 Antony Lee
+     1 Antti Kaihola
+     1 Anudeep Tubati
+     1 Anushka Bishnoi
+     1 Arash Rouhani
+     1 Arco Bast
+     1 Ari Sosnovsky
+     1 Arjun Sharma
+     1 Arkadeep Adhikari
+     1 ArnaudChanoine
+     1 Arno Veenstra
+     1 Artur Barseghyan
+     1 Arun12121
+     1 Arunim Samudra
+     1 Arushi Sharma
+     1 Arya Sarkar
+     1 Asadullah Naeem
+     1 Ashish Singal
+     1 Ashkan
+     1 Ashlan Parker
+     1 Ashwani
+     1 Ashwin Prakash Nalwade
+     1 Ashwini Chaudhary
+     1 Austin Au-Yeung
+     1 Austin Burnett
+     1 Austin Hackett
+     1 Avi Kelman
+     1 Ayappan
+     1 AyowoleT
+     1 Ayushman Kumar
+     1 Azeez Oluwafemi
+     1 B. J. Potter
+     1 Bailey Lissington
+     1 Barry Fitzgerald
+     1 Bart
+     1 Bart Aelterman
+     1 Bart Broere
+     1 Bas Nijholt
+     1 Bastiaan
+     1 Bayle Shanks
+     1 Bear
+     1 Becky Sweger
+     1 Ben
+     1 Ben Auffarth
+     1 Ben Forbes
+     1 Ben James
+     1 Ben Mangold
+     1 Ben Nelson
+     1 Ben North
+     1 Ben Wozniak
+     1 Benedikt Heidrich
+     1 Beni Bienz
+     1 Benjamin Fischer
+     1 Benjamin Gross
+     1 Benjamin Grove
+     1 Benjamin M. Gross
+     1 Benoit Paquet
+     1 Benoit Pointet
+     1 Berkay
+     1 Bernardo Gameiro
+     1 Bernhard Thiel
+     1 Bernhard Wagner
+     1 Bhavesh Poddar
+     1 Bhavesh Rajendra Patil
+     1 Bibek Jha
+     1 BielStela
+     1 Bijay Regmi
+     1 Bill Blum
+     1 Bill Chambers
+     1 Bishwas
+     1 Bjorn Arneson
+     1 Blair
+     1 Bob Baxley
+     1 Bobin Mathew
+     1 Bogdan Pilyavets
+     1 Boris Lau
+     1 BorisVerk
+     1 Bradley Dice
+     1 Brandon Rhodes
+     1 Brayan Alexander Muñoz B
+     1 BrenBarn
+     1 Brendan Sullivan
+     1 Brendan Wilby
+     1 Brett Randall
+     1 Brett Rosen
+     1 Brian
+     1 Brian Choi
+     1 Brian Granger
+     1 Brian J. McGuirk
+     1 Brian Jacobowski
+     1 Brian McFee
+     1 Brian Quistorff
+     1 Brian Strand
+     1 Brian Tu
+     1 Bruno Almeida
+     1 Bruno Costa
+     1 Bruno P. Kinoshita
+     1 Bryan Racic
+     1 Bryant Moscon
+     1 Bryce Guinta
+     1 Byron Boulton
+     1 C John Klehm
+     1 C.A.M. Gerlach
+     1 CCXXXI
+     1 Caleb
+     1 Caleb Braun
+     1 Carl Johan
+     1 Carlos Eduardo Moreira dos Santos
+     1 Carlos García Márquez
+     1 CarlosGDCJ
+     1 Carsten van Weelden
+     1 Carter Green
+     1 Cecilia
+     1 Cesar H
+     1 Charles Blackmon-Luca
+     1 Charlie Clark
+     1 Chase Albert
+     1 Chathura Widanage
+     1 Chau Hoang
+     1 Chetan0402
+     1 Chinmay Rane
+     1 Chitrank Dixit
+     1 Chris Barnes
+     1 Chris Burr
+     1 Chris Carini
+     1 Chris Carroux
+     1 Chris Catalfo
+     1 Chris Filo Gorgolewski
+     1 Chris Gilmer
+     1 Chris Ham
+     1 Chris Kerr
+     1 Chris M
+     1 Chris Moradi
+     1 Chris Mulligan
+     1 Chris Roberts
+     1 Chris Roth
+     1 ChrisRobo
+     1 Christian Berendt
+     1 Christian Geier
+     1 Christian Haege
+     1 Christian Perez
+     1 Christian Prinoth
+     1 Christian Stade-Schuldt
+     1 Christoph Deil
+     1 Christoph Moehl
+     1 Christoph Paulik
+     1 Christopher Hadley
+     1 Christopher Scanlin
+     1 Christopher Yeh
+     1 Chu Qing Hao
+     1 Cihan Ceyhan
+     1 Clark-W
+     1 ClaudiaSilver
+     1 Clearfield Christopher
+     1 Clemens Tolboom
+     1 Clément Robert
+     1 Cody Piersall
+     1 Coelhudo
+     1 Colin
+     1 Connor Charles
+     1 Constantine Glen Evans
+     1 Corentin Girard
+     1 Cornelius Riemenschneider
+     1 Corralien
+     1 Corrie Bartelheimer
+     1 Coulton Theuer
+     1 Crystal Gong
+     1 CuylenE
+     1 D.S. McNeil
+     1 DG
+     1 Da Cheezy Mobsta
+     1 Da Wang
+     1 DaCoEx
+     1 Damian Barabonkov
+     1 Damini Satya
+     1 Damodara Puddu
+     1 Dan Davison
+     1 Dan Dixey
+     1 Dan Hendry
+     1 Dan King
+     1 Dan Moore
+     1 DanBasson
+     1 Daniel Chen
+     1 Daniel Coll
+     1 Daniel Frank
+     1 Daniel Garrido
+     1 Daniel Hähnke
+     1 Daniel Julius Lasiman
+     1 Daniel Luis Costa
+     1 Daniel Ni
+     1 Daniel Sakuma
+     1 Daniel Shapiro
+     1 Daniel Siladji
+     1 Daniel Weindl
+     1 Daniele Nicolodi
+     1 Danilo Horta
+     1 Darcy Meyer
+     1 Darin Plutchok
+     1 Dav Clark
+     1 Dave Lewis
+     1 David
+     1 David BROCHART
+     1 David C Hall
+     1 David Fischer
+     1 David Hirschfeld
+     1 David Hoese
+     1 David Hoffman
+     1 David John Gagne
+     1 David Jung
+     1 David Kelly
+     1 David Kwong
+     1 David Liu
+     1 David Lutz
+     1 David Polo
+     1 David Rasch
+     1 David Read
+     1 David Rouquet
+     1 David Rudel
+     1 David S
+     1 David Samuel
+     1 David Sanders
+     1 David Schlachter
+     1 David Seifert
+     1 David Toneian
+     1 David Wales
+     1 David Wolever
+     1 David Zaslavsky
+     1 David-dmh
+     1 DavidKleindienst
+     1 DavidRosen
+     1 Dean
+     1 Dean Langsam
+     1 Deepak George
+     1 Deepak Pandey
+     1 Deepak Sirohiwal
+     1 Deepan Das
+     1 Denis Belavin
+     1 Denis Sapozhnikov
+     1 Dennis Bakhuis
+     1 Dennis Kamau
+     1 Derzan Chiang
+     1 Dharani Akurathi
+     1 Dhruv B Shetty
+     1 Dhruv Samdani
+     1 Diego Fernandez
+     1 Diego Torres
+     1 Dimitri
+     1 Dimitris Spathis
+     1 Dina
+     1 Dirk Ulbricht
+     1 Dmitriy Perepelkin
+     1 Dmitry L
+     1 Dmitry Savostyanov
+     1 Dmitry Shemetov
+     1 Dmytro Litvinov
+     1 Do Young Kim
+     1 Dobatymo
+     1 Dody Suria Wijaya
+     1 Dominik Berger
+     1 Dominik Kutra
+     1 Dominik Stanczak
+     1 Dominique Garmier
+     1 Donald Curtis
+     1 DorAmram
+     1 Doris Lee
+     1 Dorothy Kabarozi
+     1 Dorozhko Anton
+     1 Doug Coleman
+     1 Douglas Lohmann
+     1 Dr. Jan-Philip Gehrcke
+     1 Dr. Leo
+     1 DrIrv
+     1 Drew Fustin
+     1 Drew Heenan
+     1 Drew Levitt
+     1 Drewrey Lupton
+     1 Dukastlik
+     1 Dustin K
+     1 Dylan Dmitri Gray
+     1 Dylan Percy
+     1 Dāgs Grīnbergs
+     1 ETF
+     1 Eduardo Blancas Reyes
+     1 Eduardo Chaves
+     1 Edward Huang
+     1 Efkan S. Goktepe
+     1 Ege Özgüroğlu
+     1 Ehsan Azarnasab
+     1 Ekaterina Borovikova
+     1 Elahe Sharifi
+     1 Eli Dourado
+     1 Eli Schwartz
+     1 Eli Treuherz
+     1 Eliza Mae Saret
+     1 Elliot Marsden
+     1 Elliot S
+     1 Elvis Lim
+     1 Emily Taylor
+     1 Emma Carballal Haire
+     1 Ennemoser Christoph
+     1 Enrico Rotundo
+     1 Eric Boxer
+     1 Eric Brassell
+     1 Eric Goddard
+     1 Eric Groszman
+     1 Eric Han
+     1 Eric Kisslinger
+     1 Eric O. LEBIGOT (EOL)
+     1 Eric Stein
+     1 Eric Wong
+     1 Erik Fredriksen
+     1 Erik Hasse
+     1 Erik M. Bray
+     1 Erik Nilsson
+     1 Erik Welch
+     1 Erkam Uyanik
+     1 Ernesto Freitas
+     1 EternalLearner42
+     1 Ethan Chen
+     1 Eunseop Jeong
+     1 Evan
+     1 Evan D
+     1 Evan Kanter
+     1 Evan Livelo
+     1 Evgeny Naumov
+     1 Ewout ter Hoeven
+     1 Eyal Trabelsi
+     1 Eyden Villanueva
+     1 Ezra Brauner
+     1 FAN-GOD
+     1 Fabien Aulaire
+     1 Fabrizio Pollastri
+     1 Facundo Batista
+     1 Fakabbir Amin
+     1 Fei Phoon
+     1 Felipe Maion
+     1 Felix Claessen
+     1 Felix Lawrence
+     1 Felix Marczinowski
+     1 Fer Sar
+     1 Fernando Margueirat
+     1 Fernando Perez
+     1 Filip Ter
+     1 Finlay Maguire
+     1 Flavien Lambert
+     1 Florian Hofstetter
+     1 Florian Müller
+     1 Florian Rathgeber
+     1 Florian Roscheck
+     1 Florian Wilhelm
+     1 Floris Kint
+     1 Flynn Owen
+     1 ForTimeBeing
+     1 Forbidden Donut
+     1 FragLegs
+     1 Francesc Alted
+     1 Francesco Andreuzzi
+     1 Francesco Brundu
+     1 Francesco Romandini
+     1 Francesco Truzzi
+     1 Francis
+     1 Francisco Alfaro
+     1 Francois Dion
+     1 Frank Cleary
+     1 Frank Hoang
+     1 Frank Pinter
+     1 Frans Larsson
+     1 Frans van Dunné
+     1 Fredrik Erlandsson
+     1 Fumito Hamamura
+     1 GYHHAHA
+     1 GYvan
+     1 Gabe F
+     1 Gabe Fernando
+     1 Gabi Davar
+     1 Gabriel Araujo
+     1 Gabriel Kabbe
+     1 Gabriel Moreira
+     1 Gabriel Reid
+     1 Gabriel de Maeztu
+     1 Gadea Autric
+     1 Gaibo Zhang
+     1 Gaurav Chauhan
+     1 Gaétan Ramet
+     1 Gaëtan de Menten
+     1 Geoffrey B. Eisenbarth
+     1 Georeth Chow
+     1 George Kuan
+     1 George Munyoro
+     1 Georgi Baychev
+     1 Gerard Jorgensen
+     1 German Gomez-Herrero
+     1 Gianpaolo Macario
+     1 Gilberto Olimpio
+     1 Gim Seng
+     1 GiuPassarelli
+     1 Giulio Pepe
+     1 Giuseppe Romagnuolo
+     1 Godwill Agbehonou
+     1 Gordon Blackadder
+     1 Gordon Chen
+     1 Gosuke Shibahara
+     1 Gouthaman Balaraman
+     1 Graham R. Jeffries
+     1 Graham Taylor
+     1 Graham Wetzler
+     1 Greg Gandenberger
+     1 Greg Reda
+     1 Greg Siano
+     1 Greg Williams
+     1 Gregory Livschitz
+     1 Griffin Ansel
+     1 Grigorios Giannakopoulos
+     1 Grzegorz Konefał
+     1 Guilherme Leite
+     1 Guilherme Salomé
+     1 Guilherme Samora
+     1 Guillem Borrell
+     1 Guillem Sánchez
+     1 Gulnur Baimukhambetova
+     1 Gustavo Vargas
+     1 Gyeongjae Choi
+     1 HHest
+     1 Hadi Abdi Khojasteh
+     1 HagaiHargil
+     1 Haleemur Ali
+     1 Hamidreza Sanaee
+     1 Hamish Pitkeathly
+     1 Hanmin Qin
+     1 Hao Wu
+     1 Harsh Nisar
+     1 Harsh Sharma
+     1 Harshit Patni
+     1 Harutaka Kawamura
+     1 Hassan Shamim
+     1 Hatem Nassrat
+     1 Hatim Zahid
+     1 Hedeer El Showk
+     1 Heidi
+     1 Hendrik Makait
+     1 Henry
+     1 Henry Hammond
+     1 HighYoda
+     1 Hiki
+     1 Himanshu Wagh
+     1 Hiroaki Ogasawara
+     1 Hissashi Rocha
+     1 Hood Chatham
+     1 Horace
+     1 Hsiaoming Yang
+     1 Huan Li
+     1 Hugo Herter
+     1 Huize Wang
+     1 Hung-Yi Wu
+     1 Hussain Tamboli
+     1 Ian Alexander Joiner
+     1 Ian Dunn
+     1 Ian Hoegen
+     1 Ian Ozsvald
+     1 Ibrahim Sharaf ElDen
+     1 Idil Ismiguzel
+     1 Ignacio Vergara Kausel
+     1 Ignasi Fosch
+     1 Igor Conrado Alves de Lima
+     1 Igor Filippov
+     1 Igor Gotlibovych
+     1 Igor Shelvinskyi
+     1 Illia Polosukhin
+     1 Imanflow
+     1 Inevitable-Marzipan
+     1 Isaac Schwabacher
+     1 Isaac Slavitt
+     1 Issam
+     1 IsvenC
+     1 Itayazolay
+     1 Iulius Curt
+     1 Iva Koevska
+     1 Iva Laginja
+     1 Iva Miholic
+     1 Ivan Bessarabov
+     1 Ivan Ng
+     1 Iván Vallés Pérez
+     1 JElfner
+     1 JJ
+     1 Jaca
+     1 Jack Greisman
+     1 Jack McIvor
+     1 Jackie Leng
+     1 Jacob Austin
+     1 Jacob Buckheit
+     1 Jacob Bundgaard
+     1 Jacob Deppen
+     1 Jacob Howard
+     1 Jacob Skwirsk
+     1 Jacob Stevens-Haas
+     1 Jacob Wasserman
+     1 Jake Tae
+     1 Jake Torcasso
+     1 Jake Vanderplas
+     1 Jakob Jarmar
+     1 Jakub Nowacki
+     1 James Bourbeau
+     1 James Casbon
+     1 James Freeman
+     1 James Goppert
+     1 James Hiebert
+     1 James Holcombe
+     1 James McBride
+     1 James Moro
+     1 James Santucci
+     1 James Spencer
+     1 James Winegar
+     1 Jan F-F
+     1 Jan Müller
+     1 Jan Novotný
+     1 Jan Pipek
+     1 Jan Wagner
+     1 Jan Werkmann
+     1 Jan Červenka
+     1 JanVHII
+     1 Jane Chen
+     1 JasmandeepKaur
+     1 Jason Bandlow
+     1 Jason Bian
+     1 Jason Jia
+     1 Jason Kiley
+     1 Jason Swails
+     1 Jason Wolosonovich
+     1 Jasper J.F. van den Bosch
+     1 Javad
+     1 Jay Alammar
+     1 Jay Bourque
+     1 Jay Offerdahl
+     1 Jayanth Katuri
+     1 Jean-Baptiste Schiratti
+     1 Jean-Francois Zinque
+     1 Jeanderson Barros Candido
+     1 Jeff Blackburne
+     1 Jeff Hammerbacher
+     1 Jeff Hernandez
+     1 Jeffrey Starr
+     1 Jenn Olsen
+     1 Jeongmin Yu
+     1 Jered Dominguez-Trujillo
+     1 Jeremy Wagner
+     1 Jernej Makovsek
+     1 Jerod Estapa
+     1 Jesse Pardue
+     1 Jiezheng2018
+     1 Jihwan Song
+     1 Jihyung Moon
+     1 Jim
+     1 Jim Jeon
+     1 JimStearns206
+     1 Jimmy Woo
+     1 Jinli Xiao
+     1 Jinyang Zhou
+     1 Jirka Borovec
+     1 Joachim Wagner
+     1 Joan Martin Miralles
+     1 Joanna Ge
+     1 Joao Pedro Berno Zanutto
+     1 Joaquim L. Viegas
+     1 Jody Klymak
+     1 Joel Gibson
+     1 Joel Whittier
+     1 Joerg Rittinger
+     1 Joeun Park
+     1 Johan Kåhrström
+     1 Johan von Forstner
+     1 John
+     1 John Bencina
+     1 John Bodley
+     1 John C
+     1 John Cant
+     1 John Evans
+     1 John Fremlin
+     1 John Paton
+     1 John S Bogaardt
+     1 John Tucker
+     1 John Ward
+     1 Johnny
+     1 Johnny Chiu
+     1 Johnny Gill
+     1 Johnny Metz
+     1 Jon Bramley
+     1 Jon Crall
+     1 Jon Haitz Legarreta Gorroño
+     1 Jon Thielen
+     1 Jon Wiggins
+     1 Jona Sassenhagen
+     1 Jonas
+     1 Jonas Buyl
+     1 Jonas Hoersch
+     1 Jonas Laursen
+     1 Jonas Schulze
+     1 Jonathan J. Helmus
+     1 Jonathan Larkin
+     1 Jonathan Whitmore
+     1 Jonathan de Bruin
+     1 Jonathon Vandezande
+     1 Jongwony
+     1 Joost Kranendonk
+     1 Jop Vermeer
+     1 Jordan Erenrich
+     1 Jorge López Fueyo
+     1 Joschka zur Jacobsmühlen
+     1 Jose
+     1 Jose Manuel Martí
+     1 Jose Ortiz
+     1 Jose Rivera-Rubio
+     1 Joseph Gulian
+     1 JosephParampathu
+     1 JosephWagner
+     1 Josh Howes
+     1 Josh Levy-Kramer
+     1 Josh Owen
+     1 Josh Temple
+     1 Joshua Bradt
+     1 Joshua Klein
+     1 Joshua Leahy
+     1 Joshua Shew
+     1 Joshua Smith
+     1 Joshua Storck
+     1 José F. R. Fonseca
+     1 Jovixe
+     1 Joy Bhalla
+     1 Jozef Brandys
+     1 João Andrade
+     1 João Meirelles
+     1 João Veiga
+     1 Julia Aoun
+     1 Julian Badillo
+     1 Julian Fleischer
+     1 Julian Kuhlmann
+     1 Julian Ortiz
+     1 Julian Santander
+     1 Julian Schnitzler
+     1 Julian Steger
+     1 Julien Marrec
+     1 Julien Palard
+     1 Julien de la Bruère-T
+     1 Julio Martinez
+     1 Jun
+     1 Jun Kim
+     1 Juraj Niznan
+     1 Justin Berka
+     1 Justin C Johnson
+     1 Justin Cole
+     1 Justin Lin
+     1 Justin Sexton
+     1 Justin Solinsky
+     1 Justin Tyson
+     1 Justus Magin
+     1 Jörg Döpfert
+     1 Júlio
+     1 KD-dev-lab
+     1 Kabiir Krishna
+     1 Kacawi
+     1 Kadatatlu Kishore
+     1 Kai Mühlbauer
+     1 Kai Priester
+     1 Kamal Kamalaldin
+     1 Kamil Sindi
+     1 Kane
+     1 Kapil Patel
+     1 Karel De Brabandere
+     1 Karl Dunkle Werner
+     1 Karmanya Aggarwal
+     1 Karrie Kehoe
+     1 Karthik Velayutham
+     1 Kassandra Keeton
+     1 Katharina Tielking, MD
+     1 Katherine Surta
+     1 Katherine Younglove
+     1 Katie Atkinson
+     1 Kaushal Rohit
+     1 Kavya9986
+     1 Kazuki Igeta
+     1 Kazuto Haruguchi
+     1 Keiron Pizzey
+     1 Keith Kraus
+     1 Keith Webber
+     1 Kelly
+     1 Keming Zhang
+     1 Ken Mankoff
+     1 Ken Van Haren
+     1 Kendall
+     1 Kenil
+     1 Kenneth
+     1 Ketan
+     1 Ketu Patel
+     1 Kevin Klein
+     1 Kevin Kuhl
+     1 Kevin Markham
+     1 Kevin Nguyen
+     1 Kian S
+     1 Kiley Hewitt
+     1 Kilian Lieret
+     1 Kimi Li
+     1 Kinza-Raza
+     1 Kirk Hansen
+     1 Kisekka David
+     1 Kodi Arfer
+     1 Koen Roelofs
+     1 Koki Nishihara
+     1 Konjeti Maruthi
+     1 Koushik
+     1 Kristian Holsheimer
+     1 Krzysztof Chomski
+     1 Ksenia Bobrova
+     1 Ksenia Gueletina
+     1 Kumar Shivam
+     1 Kurtis Kerstein
+     1 Kyle
+     1 Kyle Boone
+     1 Kyle Hausmann
+     1 Kyle Kosic
+     1 Kyle McCahill
+     1 LM
+     1 Laksh Arora
+     1 Lakshya A Agrawal
+     1 Larry Ren
+     1 Larry Wong
+     1 Laurens Geffert
+     1 Laurent Gautier
+     1 Leon Yin
+     1 Leonardo Freua
+     1 Levi Matus
+     1 Lewis Cowles
+     1 Li Jin
+     1 Liam Marshall
+     1 Liang-Chi Hsieh
+     1 Lightyears
+     1 Linda Chen
+     1 Line Pedersen
+     1 Linxiao Francis Cong
+     1 Linxiao Wu
+     1 LiuSeeker
+     1 Liudmila
+     1 Lorenzo Bolla
+     1 Lorenzo Cestaro
+     1 Lorenzo Stella
+     1 Louis-Émile Robitaille
+     1 Loïc Séguin-C
+     1 Luca Donini
+     1 Lucas Damo
+     1 Lucas Holtz
+     1 LucasG0
+     1 Lucca Delchiaro Costabile
+     1 Luciano Viola
+     1 Lucky Sivagurunathan
+     1 Lucy Jiménez
+     1 Ludovico Russo
+     1 Luis Ortiz
+     1 Luiz Gustavo
+     1 Lukasz
+     1 Luke Lee
+     1 Luke Shepard
+     1 LunarLanding
+     1 Luo Yicheng
+     1 MKhalusova
+     1 Maarten Rietbergen
+     1 Mac
+     1 Maciej J
+     1 Maciej Kos
+     1 Madhuri Palanivelu
+     1 Madhuri Patil
+     1 Maggie Liu
+     1 Magnus Jöud
+     1 Mahdi Ben Jelloul
+     1 MainHanzo
+     1 Makarov Andrey
+     1 Malcolm
+     1 Malgorzata Turzanska
+     1 Manu NALEPA
+     1 Manuel Leonhardt
+     1 Manuel Riel
+     1 Maoyuan Liu
+     1 Marat Kopytjuk
+     1 Marcel Bittar
+     1 Marcel Gerber
+     1 Marco Farrugia
+     1 Maria Stazherova
+     1 Maria Telenczuk
+     1 Maria del Mar Bibiloni
+     1 Maria-Alexandra Ilie
+     1 Mariam-ke
+     1 Marie K
+     1 Marielle
+     1 Mario Corchero
+     1 Marius Potgieter
+     1 Mark Harfouche
+     1 Mark Mandel
+     1 Mark O'Leary
+     1 Mark Roth
+     1 Mark Sikora
+     1 Mark Woodbridge
+     1 Marlene Silva Marchena
+     1 MarsGuy
+     1 Martin
+     1 Martin Babka
+     1 Martin Bjeldbak Madsen
+     1 Martin Dengler
+     1 Martin Grigorov
+     1 Martin Jones
+     1 Martin Journois
+     1 Martin Šícho
+     1 Marty Rudolf
+     1 Marvin
+     1 Marvin John Walter
+     1 MarvinGravert
+     1 Marvzinc
+     1 María Marino
+     1 MasonGallo
+     1 MatanCohe
+     1 Mateusz Piotrowski
+     1 Matheus Cardoso
+     1 Matheus Cerqueira
+     1 Matheus Pedroni
+     1 Mathew Topper
+     1 Mathias Hauser
+     1 Matilda M
+     1 Matt Bark
+     1 Matt Boggess
+     1 Matt Cooper
+     1 Matt Gambogi
+     1 Matt Savoie
+     1 Matt Suggit
+     1 Matt Williams
+     1 MattRijk
+     1 Matteo Felici
+     1 Matteo Raso
+     1 Matthew Davis
+     1 Matthew Kirk
+     1 Matthew Lurie
+     1 Matthew Rocklin
+     1 Matthew Tan
+     1 Matthias Kuhn
+     1 Matthieu Brucher
+     1 Matti Airas
+     1 Mauro Silberberg
+     1 Max Chang
+     1 Max Grender-Jones
+     1 Max Halford
+     1 Max Kanter
+     1 Max Kovalovs
+     1 Max Mikhaylov
+     1 MaxU
+     1 Maxim Kupfer
+     1 Maxwell Bileschi
+     1 Mayank Asthana
+     1 Mayank Bisht
+     1 Mayank Chaudhary
+     1 Mayur
+     1 Megan Thong
+     1 Mehdi Mohammadi
+     1 Mehgarg
+     1 Mehmet Ali "Mali" Akmanalp
+     1 Melissa Weber Mendonça
+     1 Metehan Kutlu
+     1 Mia Reimer
+     1 Micah Smith
+     1 Michael Davis
+     1 Michael Droettboom
+     1 Michael E. Gruen
+     1 Michael Felt
+     1 Michael Gasvoda
+     1 Michael Harris
+     1 Michael Lamparski
+     1 Michael Milton
+     1 Michael Mior
+     1 Michael P. Moran
+     1 Michael Röttger
+     1 Michael Sarrazin
+     1 Michael Scherer
+     1 Michael Selik
+     1 Michael Silverstein
+     1 Michael Terry
+     1 Michael Waskom
+     1 Michael-J-Ward
+     1 Michel de Ruiter
+     1 Michelangelo D'Agostino
+     1 Michiel Stock
+     1 Mickaël Schoentgen
+     1 Mie~~~
+     1 Miguel Marques
+     1 Miguel Sánchez de León Peque
+     1 Mike Cramblett
+     1 Mike Graham
+     1 Mike McCarty
+     1 Mike Roberts
+     1 Mike Taves
+     1 Mikolaj Chwalisz
+     1 Miles Cranmer
+     1 Milo
+     1 Miloni Atal
+     1 Min ho Kim
+     1 MinGyo Jung
+     1 MinRK
+     1 MirijaH
+     1 Misha Veldhoen
+     1 Mitlasóczki Bence
+     1 Miyuu
+     1 Mohamad Hussein Rkein
+     1 Mohammad Ahmadi
+     1 Mohit Anand
+     1 Monica
+     1 Montana Low
+     1 Moonsoo Kim
+     1 Morgan Stuart
+     1 Morgan243
+     1 Morisa Manzella
+     1 Moritz Münst
+     1 Moritz Schreiber
+     1 Moritz Schubert
+     1 Morten Canth Hels
+     1 Morton Fox
+     1 Moussa Taifi
+     1 Muhammad Haseeb Tariq
+     1 Mukul Ashwath Ram
+     1 MusTheDataGuy
+     1 Mykola Golubyev
+     1 NJOKU OKECHUKWU VALENTINE
+     1 NNLNR
+     1 Nagesh Kumar C
+     1 Nanda H Krishna
+     1 Nate Armstrong
+     1 Nate George
+     1 Nathan Ford
+     1 Nathan Sanders
+     1 Nathan Typanski
+     1 Nathanael
+     1 Naveen Agrawal
+     1 Navreet Gill
+     1 Neal McBurnett
+     1 Nehil Jain
+     1 Nicholas Stahl
+     1 Nicholas Ursa
+     1 Nick Burns
+     1 Nick Foti
+     1 Nick Garvey
+     1 Nick Newman
+     1 Nick Stahl
+     1 Nicklaus Roach
+     1 Nico Schlömer
+     1 Nicolas Camenisch
+     1 Nicolas Hug
+     1 Nidhi Zare
+     1 Nigel Markey
+     1 Nikita Sobolev
+     1 Niklas Weber
+     1 Nikolay Boev
+     1 Nikoleta Glynatsi
+     1 Nikos Karagiannakis
+     1 Nils Müller-Wendt
+     1 Nima Sarang
+     1 Nipun Sadvilkar
+     1 Nis Martensen
+     1 Nishu Choudhary
+     1 Noah Spies
+     1 Nolan Nichols
+     1 Noora Husseini
+     1 Noy Hanan
+     1 Noémi Éltető
+     1 Number42
+     1 ObliviousParadigm
+     1 Oktay Sabak
+     1 Olga Lyashevska
+     1 Olivier Bilodeau
+     1 Olivier Cavadenti
+     1 Olivier Harris
+     1 Omar Afifi
+     1 Omer Ozarslan
+     1 Omkar Yadav
+     1 Onno Eberhard
+     1 Osman
+     1 Owen Lamont
+     1 Ozan Öğreden
+     1 P-Tillmann
+     1 P. Talley
+     1 PApostol
+     1 Pablo
+     1 Pablo Ambrosio
+     1 Pan Deng / Zora
+     1 Parfait G
+     1 Partho
+     1 Pastafarianist
+     1 Patrick
+     1 Patrick Luo
+     1 Patrick O'Melveny
+     1 Patrick Park
+     1 Patrik Hlobil
+     1 Paul Lilley
+     1 Paul Mannino
+     1 Paul Masurel
+     1 Paul McCarthy
+     1 Paul Mestemaker
+     1 Paul Pellissier
+     1 Paul Reiners
+     1 Paul Siegel
+     1 Paul Uhlenbruck
+     1 Paul van Mulbregt
+     1 Paula
+     1 Paulo Roberto de Oliveira Castro
+     1 Paulo S. Costa
+     1 Pawel Kranzberg
+     1 Pax
+     1 Pearcekieser
+     1 Pedro Larroy
+     1 Peng Yu
+     1 Pepe Flores
+     1 Pete Huang
+     1 Peter Bull
+     1 Peter Csizsek
+     1 Peter Hawkins
+     1 Peter Hoffmann
+     1 Peter Liu
+     1 Peter Quackenbush
+     1 Peter Steinbach
+     1 Peter Tillmann
+     1 Peter Waller
+     1 Peter Yanovich
+     1 Petio Petrov
+     1 Petr Baev
+     1 Petra Chong
+     1 Phil Kirlin
+     1 Phil Ngo
+     1 Phil Ruffwind
+     1 Phil Schaf
+     1 Philip Cerles
+     1 Philip Gura
+     1 Philippe Ombredanne
+     1 Pierre-Yves Bourguignon
+     1 Pierrot
+     1 Piotr Kasprzyk
+     1 Pooja Subramaniam
+     1 Prabha Arivalagan
+     1 Prabhjot Singh
+     1 Pradyumna Reddy Chinthala
+     1 Pranav Saibhushan Ravuri
+     1 Pranav Suri
+     1 Pranav. P. A
+     1 Pranjal Bhardwaj
+     1 Prashant Anand
+     1 Pratap Vardhan
+     1 PrathumP
+     1 Pratik Patel
+     1 Priyanka Ojha
+     1 Puneeth K
+     1 Puneetha Pai
+     1 Purna Chandra Mansingh
+     1 Purushothaman Srikanth
+     1 Pyry Kovanen
+     1 QP Hou
+     1 README Bot
+     1 Radoslaw Lemiec
+     1 Rafael Jaimes III
+     1 Rafif
+     1 Rahul Gaikwad
+     1 Rahul Siloniya
+     1 RahulHP
+     1 Rajasvi Vinayak
+     1 Rajat
+     1 Rajhans Jadhao
+     1 Rajiv Bharadwaj
+     1 Rakshit Naidu
+     1 Ralph Bean
+     1 Randolf Scholz
+     1 Rebecca Chen
+     1 Red
+     1 Redonnet Louis
+     1 Reinert Huseby Karlsen
+     1 Renato Cotrim Maciel
+     1 RenzoBertocchi
+     1 Reshama Shaikh
+     1 Reza (Milad) Maanijou
+     1 Rhys Parry
+     1 Ricardo Alanis
+     1 Ricardo Martins
+     1 Richard Eames
+     1 Richard Lewis
+     1 Ridhwan Luthra
+     1 Rishipuri
+     1 Rob Forgione
+     1 Rob deCarvalho
+     1 Robbert-jan 't Hoen
+     1 Robbie Palmer
+     1 Robert
+     1 Robert Gibboni
+     1 Robert Kern
+     1 Robert Luce
+     1 Robert Voyer
+     1 Robin Kiplang'at
+     1 Robin Raymond
+     1 RobinFiveWords
+     1 Robsdedude
+     1 Rodolfo Fernandez
+     1 Roger Murray
+     1 Rohan Pandit
+     1 Rohan Sharma
+     1 Rohan Sirohia
+     1 Rohit Gupta
+     1 Rok Mihevc
+     1 RomainSa
+     1 Roman Imankulov
+     1 Roman Khomenko
+     1 Ronalido
+     1 Ror
+     1 Ross Petchler
+     1 Roy Keyes
+     1 Roymprog
+     1 Ruizhe Deng
+     1 Rupert Thompson
+     1 Russell Smith
+     1 Ryan Gibson
+     1 Ryan Grout
+     1 Ryan Hendrickson
+     1 Ryan Joyce
+     1 Rylie Wei
+     1 Ryszard T. Kaleta
+     1 Rémy Léone
+     1 Rüdiger Busche
+     1 S Mono
+     1 S.TAKENO
+     1 SAI SRAVAN MEDICHERLA
+     1 SELEE
+     1 SEUNG HOON, SHIN
+     1 SFuller4
+     1 SHUBH CHATTERJEE
+     1 SOUMYADIP MAL
+     1 Saiwing Yeung
+     1 Sakar Panta
+     1 Sam Cohen
+     1 Sam Ezebunandu
+     1 Sam James
+     1 Sam Purkis
+     1 Sam Zhang
+     1 Sami Salonen
+     1 Samir Musali
+     1 Samira-g-js
+     1 Samuel Denny
+     1 Samuel GIFFARD
+     1 Samyak Jain
+     1 Sandeep Pathak
+     1 Sander
+     1 Sandrine Pataut
+     1 Sandro Casagrande
+     1 Sandu Ursu
+     1 Sanghee Kim
+     1 Sangmin Park
+     1 Sanjiv Lobo
+     1 Santosh Kumar
+     1 Sara Bonati
+     1 Sarah Bird
+     1 Sarah Masud
+     1 SarahJessica
+     1 Sarma Tangirala
+     1 Sasidhar Kasturi
+     1 Satrio H Wicaksono
+     1 Saumitra Shahapure
+     1 Sayed Qaiser Ali
+     1 Schaer, Jacob C
+     1 Scott Cole
+     1 Scott Gigante
+     1 Scott McAllister
+     1 SdgJlbl
+     1 Sean Chan
+     1 Sean M. Law
+     1 Sebastiaan Vermeulen
+     1 Sebastian Gsänger
+     1 Sebastian Pölsterl
+     1 Sebastian Roll
+     1 Sebastian Rubbert
+     1 Senthil Palanisami
+     1 Sereger13
+     1 Sergei Chipiga
+     1 Sergei Ivko
+     1 Sergey
+     1 Sergey Kopylov
+     1 Sergey Zakharov
+     1 Sergio Pascual
+     1 Shaghayegh
+     1 Shannon Wang
+     1 Sharad Vijalapuram
+     1 Sharon Woo
+     1 Shashank Agarwal
+     1 Shashank Shet
+     1 Shashwat Sharma
+     1 Shengpu Tang
+     1 Sheogorath27
+     1 Shiko Wamwea
+     1 ShilpaSugan
+     1 Shirish Kadam
+     1 Shrey Dixit
+     1 Shubham Chaudhary
+     1 Shubham Mehra
+     1 Shubhankar Lohani
+     1 Shudong Yang
+     1 Shyam Saladi
+     1 Shyamala Venkatakrishnan
+     1 SiYoungOh
+     1 Siddhesh Poyarekar
+     1 Sidharthan Nair
+     1 Simar Bassi
+     1 Simon Boehm
+     1 Simon Brugman
+     1 Simon Høxbro Hansen
+     1 Simon Legner
+     1 Simon Riddell
+     1 SimonBaron
+     1 Sioned Baker
+     1 Siu Kwan Lam
+     1 Sixuan (Cherie) Wu
+     1 Snorf Yang
+     1 Soham Tiwari
+     1 Solomon Song
+     1 Song Wenhao
+     1 Sortofamudkip
+     1 Soumik Dutta
+     1 Soumya
+     1 Sourav kumar
+     1 Souris Ash
+     1 Sowrov Talukder
+     1 Soyoun Kim
+     1 Spencer Carrucciu
+     1 Spencer Clark
+     1 SplashDance
+     1 StEmGeo
+     1 Stan West
+     1 Stefan Mejlgaard
+     1 Stefan van der Walt
+     1 Stefano Alberto Russo
+     1 Stefano Miccoli
+     1 Steffen Schmitz
+     1 Sten
+     1 Stepfen Shawn
+     1 Stephan Heßelmann
+     1 Stephen
+     1 Stephen Cowley
+     1 Stephen Kappel
+     1 StephenVoland
+     1 Sterling Paramore
+     1 Steve Baker
+     1 Steve Choi
+     1 Steve Dower
+     1 Steven
+     1 Steven Bamford
+     1 Steven Cutting
+     1 Steven Pitman
+     1 Stewart Henderson
+     1 Stuart Berg
+     1 Stéphane Guillou
+     1 Sudarshan Konge
+     1 Sudeep
+     1 SultanOrazbayev
+     1 Sumin Byeon
+     1 SurajH1
+     1 Suyash Gupta
+     1 Swanand01
+     1 Sylvain Corlay
+     1 Sylvia
+     1 Szymon Bednarek
+     1 Sébastien de Menten
+     1 Sören
+     1 T N
+     1 Taavi Burns
+     1 Tabea Kossen
+     1 Takuya N
+     1 Talitha Pumar
+     1 Tamas Nagy
+     1 Tambe Tabitha Achere
+     1 Tang Heyi
+     1 Tanmay Daripa
+     1 Tara Adiseshan
+     1 Tarun Raghunandan Kaushik
+     1 Telt
+     1 Terry Santegoeds
+     1 TheDerivator
+     1 Thiago Cordeiro da Fonseca
+     1 Thiago Gariani
+     1 Thiago Serafim
+     1 ThibTrip
+     1 Thomas
+     1 Thomas Guillet
+     1 Thomas H
+     1 Thomas Heavey
+     1 Thomas Kastl
+     1 Thomas Kluiters
+     1 Thomas Lazarus
+     1 Thomas Lentali
+     1 Thomas Vranken
+     1 Thomas Wiecki
+     1 ThomasBlauthQC
+     1 Thoralf Gutierrez
+     1 Thouis (Ray) Jones
+     1 Thrasibule
+     1 Tianye Song
+     1 Tilen Kusterle
+     1 Tim Akinbo
+     1 Tim Cera
+     1 Tim Gates
+     1 Tim Hopper
+     1 Tim Loderhose
+     1 Tim McFarland
+     1 Tim McNamara
+     1 Tim Tran
+     1 Tim Yang
+     1 Tirth Jain
+     1 Tobias Gustafsson
+     1 Tobias McNulty
+     1 Tolker-KU
+     1 Tom
+     1 Tom Farnbauer
+     1 Tom Neep
+     1 Tomas Pavlik
+     1 Tomasz Kluczkowski
+     1 Tomasz Sakrejda
+     1 Tommy Lynch
+     1 Tomoki Nakagawa
+     1 Tomáš Chvátal
+     1 Tong Shen
+     1 Tony Hirst
+     1 Toro
+     1 Toroi
+     1 TraverseTowner
+     1 Travis
+     1 Travis Gibbs
+     1 Triple0
+     1 Tsvika Shapira
+     1 Tuan
+     1 Tuhin Mahmud
+     1 Tulio Casagrande
+     1 Ty Mick
+     1 Unprocessable
+     1 Upkar Lidder
+     1 Utkarsh Upadhyay
+     1 Uwe
+     1 Uwe Hoffmann
+     1 V.I. Wood
+     1 Vadym Barda
+     1 Valentin Iovene
+     1 Valentin Oliver Loftsson
+     1 VanMyHu
+     1 Vandana Iyer
+     1 Varun Sharma
+     1 Vibhu Agarwal
+     1 Victor Chaves
+     1 Victor Maryama
+     1 Victor Momodu
+     1 Victor Salgado
+     1 Vijay Sai Mutyala
+     1 Vijayant
+     1 Vikas Pandey
+     1 Vikram Shirgur
+     1 Vikramaditya Gaonkar
+     1 Vikramjeet Das
+     1 Ville Aikas
+     1 Vince W
+     1 Vincent Davis
+     1 Vinicius Akira
+     1 Vinicius Akira Imaizumi
+     1 Vinita Parasrampuria
+     1 Vinícius Figueiredo
+     1 Vipin Kumar
+     1 Vishwak Srinivasan
+     1 Vishwam Pandya
+     1 Vitória Helena
+     1 Vivek
+     1 Vivek Thazhathattil
+     1 Vladimir Berkutov
+     1 Vladimir Fokow
+     1 Vladimir Podolskiy
+     1 Vladislav
+     1 VomV
+     1 Vyas Ramasubramani
+     1 Vyom Jain
+     1 Víctor Moron Tejero
+     1 W.R
+     1 Weiwen Gu
+     1 Wesley Boelrijk
+     1 Weston Renoud
+     1 Wieland Hoffmann
+     1 Wiktor Tomczak
+     1 Wil Tan
+     1 Will Lachance
+     1 Will Thompson
+     1 William
+     1 William Bradley
+     1 William Hogman
+     1 Wilson Lin
+     1 Wilson Xing
+     1 Winand
+     1 Wojciech Sadowski
+     1 Wolf Behrenhoff
+     1 Wolfgang F. Riedl
+     1 Wouter De Coster
+     1 XF
+     1 Xiang Zhang
+     1 Xnot
+     1 YG-Riku
+     1 YaOzI
+     1 Yadunandan
+     1 Yao-Ching Huang
+     1 Yash Gupta
+     1 Yash Shukla
+     1 Yasin A
+     1 Yee Mey
+     1 Yeojin Kim
+     1 Yeongseon Choe
+     1 Yeshwanth N
+     1 Yevgeniy Grechka
+     1 Yi Liu
+     1 Yiannis Hadjicharalambous
+     1 Yikun Jiang
+     1 Yitzhak Andrade
+     1 Yoann Goular
+     1 Yoong Kang Lim
+     1 Yosuke KOBAYASHI
+     1 Yosuke Nakabayashi
+     1 Young Joong Kim
+     1 Yu Wang
+     1 Yuan Tang (Terry)
+     1 Yulia
+     1 Yuliya Dovzhenko
+     1 Yulong Yang
+     1 Yury Bayda
+     1 Yusei Tahara
+     1 Yuval Langer
+     1 Yuya Takashina
+     1 Yvan Cywan
+     1 Zach Breger
+     1 Zach Brookler
+     1 Zach Dwiel
+     1 Zach Rait
+     1 Zachary Lawrence
+     1 Zachary Moon
+     1 Zaky Bilfagih
+     1 Zbyszek Królikowski
+     1 Zeb Nicholls
+     1 Zeke
+     1 Zemux1613
+     1 Zhengfei Wang
+     1 ZhihuiChen0903
+     1 Zhiyi Wu
+     1 Ziad Kermadi
+     1 Zihao Zhao
+     1 aaron315
+     1 abaldenko
+     1 abarber4gh
+     1 aberres
+     1 abokey1
+     1 acorbe
+     1 adrian-stepien
+     1 adrienpacifico
+     1 aeltanawy
+     1 aernlund
+     1 agustín méndez
+     1 ahmad2901
+     1 ailchau
+     1 aiudirog
+     1 ajenkins-cargometrics
+     1 akielbowicz
+     1 alex argunov
+     1 alex arsenovic
+     1 alexander135
+     1 alexandercbooth
+     1 alexhtn
+     1 alexondor
+     1 ali sayyah
+     1 alinde1
+     1 amotzop
+     1 amphy
+     1 amuta
+     1 andhikayusup
+     1 andjhall
+     1 andyjessen
+     1 andymaheshw
+     1 anilbey
+     1 ankostis
+     1 anonmouse1
+     1 anton-d
+     1 aptalca
+     1 aram-cinnamon
+     1 araraonline
+     1 arnaudlegout
+     1 asharma13524
+     1 assafam
+     1 atbd
+     1 ateki
+     1 august-tengland
+     1 austinc
+     1 avelineg
+     1 aviolov
+     1 azuranski
+     1 azure-pipelines[bot]
+     1 babakkeyvani
+     1 bastewart
+     1 benabel
+     1 benarthur91
+     1 bernie gray
+     1 bertrandhaut
+     1 bganglia
+     1 bherwerth
+     1 bicarlsen
+     1 biddwan09
+     1 bk521234
+     1 bkandel
+     1 bmagnusson
+     1 bmu
+     1 boombard
+     1 bpraggastis
+     1 bravech
+     1 brendandrury
+     1 brian-pantano
+     1 bubblingoak
+     1 bunardsheng
+     1 c123w
+     1 calhockemeyer
+     1 calvinsomething
+     1 carbonleakage
+     1 carla-alves-24
+     1 carlosdanielcsantos
+     1 casadipietra
+     1 catmar22
+     1 cclauss
+     1 cdknox
+     1 chaimdemulder
+     1 chappers
+     1 charlie0389
+     1 chebee7i
+     1 chernrick
+     1 chinggg
+     1 chinskiy
+     1 chris-caballero
+     1 chromy
+     1 claudiobertoldi
+     1 claws
+     1 cmazzullo
+     1 cmmck
+     1 cnguyen-03
+     1 cobalt
+     1 code-review-doctor
+     1 color455nm
+     1 conmai
+     1 cr3
+     1 cruzzoe
+     1 cxl923cc
+     1 cyrusmaher
+     1 d10genes
+     1 dajcs
+     1 dalgarno
+     1 danchev
+     1 daniel
+     1 daniellebrown
+     1 darke-spirits
+     1 david
+     1 david-cortes
+     1 davidjameshumphreys
+     1 davidleon123
+     1 davidmvalente
+     1 davidovitch
+     1 daydreamt
+     1 denisrei
+     1 dequadras
+     1 derestle-htwg
+     1 dgram0
+     1 dhuettenmoser
+     1 dickreuter
+     1 dicristina
+     1 dimitra-karadima
+     1 dkamm
+     1 dmanikowski-reef
+     1 doosik_bae
+     1 dospix
+     1 dr-leo
+     1 dubourg
+     1 dylanchase
+     1 ebardie
+     1 economy
+     1 eduardo naufel schettino
+     1 edwardkong
+     1 ejnens
+     1 el-g-1
+     1 elmonsomiat
+     1 elrubio
+     1 ember91
+     1 emilydolson
+     1 endenis
+     1 engstrom
+     1 enisnazif
+     1 epizzigoni
+     1 est271
+     1 estasney
+     1 euri10
+     1 evangelineliu
+     1 evensure
+     1 ezcitron
+     1 fabriziop
+     1 faic
+     1 fding253
+     1 fengyqf
+     1 fivemok
+     1 fl4p
+     1 fleimgruber
+     1 floydsoft
+     1 flying-sheep
+     1 fotino21
+     1 fractionalhare
+     1 francisco souza
+     1 funnycrab
+     1 gabicca
+     1 gabrielclow
+     1 ganego
+     1 gaotian98
+     1 garanews
+     1 geetha-rangaswamaiah
+     1 gesoos
+     1 gfkang
+     1 gfr
+     1 giplessis
+     1 gmaiwald
+     1 gmollard
+     1 goldenbull
+     1 gunghub
+     1 guygoldberg
+     1 gwrome
+     1 h-vishal
+     1 hack-c
+     1 haison
+     1 hannah-c
+     1 harisbal
+     1 harshul1610
+     1 hasnain2808
+     1 hcontrast
+     1 heckeop
+     1 helger
+     1 henriqueribeiro
+     1 herrfz
+     1 hesham.shabana@hotmail.com
+     1 hhuuggoo
+     1 hironow
+     1 hongshaoyang
+     1 hshimizu77
+     1 hsperr
+     1 huashuai
+     1 hunterowens
+     1 iamsimha
+     1 ian
+     1 iansheng
+     1 icanhazcodeplz
+     1 ignamv
+     1 igorfassen
+     1 iulia
+     1 ivanovmg
+     1 jackieleng
+     1 jalazbe
+     1 jalbritt
+     1 jamesoliverh
+     1 jamin-aws-ospo
+     1 jaredsnyder
+     1 jayfoad
+     1 jazzmuesli
+     1 jebob
+     1 jeps-journal
+     1 jfcorbett
+     1 jfoo
+     1 jh-wu
+     1 jjames34
+     1 jkovacevic
+     1 jnclt
+     1 jniznan
+     1 joaoavf
+     1 joders
+     1 joelsonoda
+     1 jojomdt
+     1 jordi-crespo
+     1 josham
+     1 jotasi
+     1 joy-rosie
+     1 jsexauer
+     1 juan huguet
+     1 juliandwain
+     1 juliansmidek
+     1 juricast
+     1 justinchan23
+     1 jxb4892
+     1 jyuv
+     1 kaustuv deolal
+     1 kdiether
+     1 kevx82
+     1 kgmuzungu
+     1 kiwirob
+     1 kjford
+     1 klonuo
+     1 knuu
+     1 kotrfa
+     1 kouya takahashi
+     1 kpflugshaupt
+     1 krey
+     1 ksanghai
+     1 l736x
+     1 larvian
+     1 leandermaben
+     1 leerssej
+     1 lenolib
+     1 leo
+     1 lexy-lixinyu
+     1 lgautier
+     1 lia2710
+     1 link2xt
+     1 lloydkirk
+     1 lmcindewar
+     1 lodagro
+     1 lpkirwin
+     1 lrepiton
+     1 ltartaro
+     1 ltoniazzi
+     1 lucas
+     1 lucyleeow
+     1 lusolorz
+     1 m-ganko
+     1 mKlepsch
+     1 maheshbapatu
+     1 majiang
+     1 manikbhandari
+     1 manoj_koneni
+     1 manu
+     1 manuels
+     1 marcosrullan
+     1 mariana-LJ
+     1 marydmit
+     1 masterpiga
+     1 mattB1989
+     1 matthiashuschle
+     1 mattkeanny
+     1 mattrijk
+     1 mavismonica
+     1 maxalbert
+     1 maximilianaccardo
+     1 maximilianr
+     1 maxwasserman
+     1 mazayo
+     1 mck619
+     1 mcocdawc
+     1 mdeboc
+     1 mecopur
+     1 mgilbert
+     1 mglasder
+     1 mhb143
+     1 miguelmorin
+     1 mikebailey
+     1 milosz-martynow
+     1 minat-hub
+     1 miquelcamprodon
+     1 mjlove12
+     1 moaraccounts
+     1 monicaBee
+     1 monosans
+     1 morotti
+     1 morrme
+     1 mpuels
+     1 mrastgoo
+     1 mschmohl
+     1 mvirts
+     1 mwaskom
+     1 na2
+     1 naveenkaushik2504
+     1 nguevara
+     1 nicolab100
+     1 nikitaved
+     1 nileracecrew
+     1 nmartensen
+     1 nprad
+     1 nuffe
+     1 ojdo
+     1 omtinez
+     1 orereta
+     1 ossdev07
+     1 paihu
+     1 pajachiet
+     1 pallav-fdsi
+     1 pan Jacek
+     1 pandas-docs-bot
+     1 parchd-1
+     1 parkdj1
+     1 paul-mannino
+     1 pbreach
+     1 peadarcoyle
+     1 penelopeysm
+     1 peterpanmj
+     1 philipphanemann
+     1 phofl
+     1 pijucha
+     1 pizzathief
+     1 pmaxey83
+     1 potap75
+     1 pqzx
+     1 pratyushsharan
+     1 pvanhauw
+     1 quantumalaviya
+     1 raanasn
+     1 rafael
+     1 rafarui
+     1 raguiar2
+     1 raisadz
+     1 ramvikrams
+     1 ranarag
+     1 raph-m
+     1 ratijas
+     1 rdk1024
+     1 readyready15728
+     1 rhstanton
+     1 ribonoous
+     1 rmihael
+     1 rmunjal29
+     1 robertzk
+     1 rocabrera
+     1 roib20
+     1 rs2
+     1 ruiann
+     1 rxxg
+     1 s-weigand
+     1 sage
+     1 sagungrp
+     1 sakkemo
+     1 salem3358
+     1 salomondush
+     1 saloni30
+     1 samghelms
+     1 samilAyoub
+     1 sanderland
+     1 sangarshanan
+     1 sanguineturtle
+     1 sardonick
+     1 sarvaSanjay
+     1 saskakarsi
+     1 saucoide
+     1 saurav2608
+     1 scriptomation
+     1 sdementen
+     1 seales
+     1 segatrade
+     1 sfoo
+     1 shaido987
+     1 shiersansi
+     1 shourya5
+     1 sideeye
+     1 silentquasar
+     1 silviaovo
+     1 skwbc
+     1 sm1899
+     1 smartswdeveloper
+     1 someben
+     1 soumilbaldota
+     1 ssortman
+     1 stahlous
+     1 stas-sl
+     1 staticdev
+     1 stefansimik
+     1 stellalin7
+     1 step4me
+     1 stephenwlin
+     1 steveayers124
+     1 steveya
+     1 stijnvanhoey
+     1 strawberry beach sandals
+     1 sudhir mohanraj
+     1 sukriti1
+     1 suoniq
+     1 surveymedia.ca
+     1 svaksha
+     1 sweisss
+     1 syutbai
+     1 tadashigaki
+     1 tadeja
+     1 tamuhey
+     1 testvinder
+     1 thauck
+     1 the-nose-knows
+     1 theOehrly
+     1 theandygross
+     1 theidexisted
+     1 themrmax
+     1 theodorju
+     1 thordisstella
+     1 thuske
+     1 timcera
+     1 tkmz-n
+     1 tlaytongoogle
+     1 tmoschou
+     1 tntmatthews
+     1 tnwei
+     1 tom-alcorn
+     1 tomascassidy
+     1 tomrod
+     1 torext
+     1 tpanza
+     1 trevorkask
+     1 tsinggggg
+     1 tworec
+     1 tyuyoshi
+     1 v-mcoutinho
+     1 venkateshdatta1993
+     1 verakai
+     1 vernetya
+     1 vineethraj510
+     1 vivikelapoutre
+     1 vkk800
+     1 vladu
+     1 vmdhhh
+     1 vrserpa
+     1 vytas
+     1 wandersoncferreira
+     1 wany-oh
+     1 watercrossing
+     1 wavedatalab
+     1 wertha
+     1 willbowditch
+     1 willie3838
+     1 willweil
+     1 winlu
+     1 xgdgsc
+     1 xinrong-databricks
+     1 xzmeng
+     1 yehia67
+     1 yelite
+     1 yhaque1213
+     1 ym-pett
+     1 yqyqyq-W
+     1 yrhooke
+     1 ysau
+     1 yun
+     1 ywpark1
+     1 z3c0
+     1 zachcp
+     1 zaki-indra
+     1 zertrin
+     1 zhanghui
+     1 zhangjinjie
+     1 zhangxiaoxing
+     1 zhezherun
+     1 znmean
+     1 zys5945
+     1 zzgao
+     1 Åsmund Hjulstad
+     1 Øystein S. Haaland
+     1 Štěpán Műller
+     1 颜发才(Yan Facai)
+     1 김동현 (Daniel Donghyun Kim)
+
+Debian packaging
+   642 Rebecca N. Palmer
+   323 Yaroslav Halchenko
+    49 Mo Zhou
+    38 Andreas Tille
+    26 Graham Inggs
+     7 Jochen Sprickerhof
+     4 Dmitry Shachnev
+     2 Julian Taylor
+     2 Yaroslav O Halchenko
+     1 Diane Trout
+     1 Ole Streicher
diff --git a/control b/control
new file mode 100644 (file)
index 0000000..11e0eb4
--- /dev/null
+++ b/control
@@ -0,0 +1,234 @@
+Source: pandas
+Maintainer: Debian Science Team <debian-science-maintainers@lists.alioth.debian.org>
+Uploaders: Yaroslav Halchenko <debian@onerussian.com>,
+           Michael Hanke <michael.hanke@gmail.com>,
+           Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Section: python
+Priority: optional
+Build-Depends: debhelper-compat (= 13),
+               dh-python,
+               faketime,
+               locales-all,
+               python3-all-dev,
+               cython3 (>= 3.0.5~),
+# python3-blosc is not actually used (blosc is used through python3-tables instead)
+               python3-bottleneck (>= 1.3.6~) <!nocheck> <!nodoc>,
+               python3-bs4 (>= 4.11.2~) <!nocheck> <!nodoc>,
+               python3-dask (>= 2023.2.0~) <!nocheck> <!nodoc>,
+               python3-dateutil,
+               python3-fsspec (>= 2022.11.0~) <!nocheck> <!nodoc>,
+               python3-html5lib (>= 1.1~) <!nocheck> <!nodoc>,
+               python3-hypothesis (>= 6.46.1~) <!nocheck> <!nodoc>,
+               python3-jinja2 (>= 3.1.2~) <!nocheck> <!nodoc>,
+               python3-lxml (>= 4.9.2~) <!nocheck> <!nodoc>,
+#               python3-matplotlib (>= 3.6.3~) [!hurd-any !ia64 !loong64 !m68k !powerpc !sh4 !sparc64 !x32] <!nocheck> <!nodoc>,
+# numba has a history of bugs on non-x86, e.g. #1033907
+# architectures here are the ones on which to treat numba-related failures as RC - see also debian/tests/control
+# temporarily disabled as numba is not in testing #1033907               python3-numba (>= 0.56.4~) [amd64 i386 ppc64el] <!nocheck> <!nodoc>,
+               python3-numexpr (>= 2.8.4~) [!hurd-any] <!nocheck> <!nodoc>,
+               python3-numpy (>= 1:1.23.2~),
+               python3-odf (>= 1.4.1~) <!nocheck> <!nodoc>,
+               python3-openpyxl (>= 3.1.0~) <!nocheck> <!nodoc>,
+# doesn't seem to work in this test environment               python3-psycopg2 (>= 2.9.6~) <!nocheck> <!nodoc>,
+               python3-py <!nocheck> <!nodoc>,
+# doesn't seem to work in this test environment               python3-pymysql (>= 1.0.2~) <!nocheck> <!nodoc>,
+               python3-pyqt5 (>= 5.15.9~) <!nocheck> <!nodoc>,
+# in -indep to reduce circular dependencies python3-pyreadstat,
+               python3-pytest (>= 7.3.2~) <!nocheck> <!nodoc>,
+               python3-pytest-asyncio (>= 0.17~) <!nocheck> <!nodoc>,
+               python3-pytest-forked <!nocheck> <!nodoc>,
+               python3-pytest-localserver <!nocheck> <!nodoc>,
+               python3-pytest-xdist (>= 2.2.0~) <!nocheck> <!nodoc>,
+               python3-pytestqt (>= 4.2.0~) <!nocheck> <!nodoc>,
+# we don't have python3-pyxlsb
+               python3-scipy (>= 1.10.0~),
+               python3-setuptools (>= 51~),
+               python3-sqlalchemy (>= 2.0.0~) [!hurd-any !hppa !powerpc !sparc64] <!nocheck> <!nodoc>,
+# python3-tables is now little-endian only, and also unavailable on some ports
+               python3-tables (>= 3.8.0~) [!s390x !hppa !powerpc !ppc64 !sparc64 !hurd-any !alpha] <!nocheck> <!nodoc>,
+               python3-tabulate (>= 0.9.0~) <!nocheck> <!nodoc>,
+               python3-tk <!nocheck> <!nodoc>,
+               python3-tz (>= 2022.7~) <!nocheck> <!nodoc>,
+               python3-xlrd (>= 2.0.1~) <!nocheck> <!nodoc>,
+               python3-xlsxwriter (>= 3.0.5~) <!nocheck> <!nodoc>,
+               python3-versioneer,
+               python3-zstandard (>= 0.19.0~) <!nocheck> <!nodoc>,
+               sphinx-common,
+# for tests/examples that use old-style timezone names
+               tzdata-legacy <!nocheck> <!nodoc>,
+               xvfb <!nocheck>,
+               xauth <!nocheck>,
+               xsel <!nocheck>
+Build-Depends-Indep: python3-sphinx <!nodoc>,
+                     python3-sphinx-copybutton <!nodoc>,
+                     python3-sphinx-design <!nodoc>,
+                     python3-pydata-sphinx-theme (>= 0.14~) <!nodoc>,
+                     python3-ipykernel <!nodoc>,
+# we don't have this version                     python3-notebook (>= 7.0.6~) <!nodoc>,
+                     python3-notebook <!nodoc>,
+                     python3-nbconvert (>= 7.11.0~) <!nodoc>,
+                     python3-nbsphinx <!nodoc>,
+                     python3-numpydoc <!nodoc>,
+                     python3-pygments <!nodoc>,
+                     ipython3 (>= 7.11.1~) <!nodoc>,
+                     jdupes <!nodoc>,
+# for style.ipynb
+                     pandoc <!nodoc>,
+# for intersphinx inventories
+                     python3-doc <!nodoc>,
+                     python-numpy-doc <!nodoc>,
+                     python-scipy-doc <!nodoc>,
+                     python-matplotlib-doc <!nodoc>,
+                     python-statsmodels-doc <!nodoc>,
+# these are for not having (as many) exception messages in documentation examples
+# so may be temporarily removed if they are broken or to break bootstrap cycles
+# not in Debian (not to be confused with python3-arrow) python3-pyarrow <!nodoc>,
+#fails with KeyError 'state', possibly nbconvert bug 1731                     python3-ipywidgets <!nodoc>,
+                     python3-rpy2 <!nodoc>,
+#break depends cycle                     python3-seaborn <!nodoc>,
+# these are also used in some tests, but depend on pandas, so are in -indep to avoid
+# circular-dependency BD-Uninstallable when arch:all finishes before some of the arch:any start
+#                     python3-pyreadstat (>= 1.2.0~) <!nocheck> <!nodoc>,
+#                     python3-statsmodels <!nocheck> <!nodoc>,
+#                     python3-xarray (>= 2022.12.0~) <!nocheck> <!nodoc>,
+                      python3-matplotlib <!nocheck> <!nodoc>,
+Standards-Version: 4.7.0
+Vcs-Browser: https://salsa.debian.org/science-team/pandas
+Vcs-Git: https://salsa.debian.org/science-team/pandas.git
+Homepage: https://pandas.pydata.org/
+Rules-Requires-Root: no
+
+Package: python3-pandas
+Architecture: all
+Depends: ${misc:Depends},
+         ${python3:Depends},
+         python3-numpy,
+         python3-dateutil,
+         python3-pandas-lib (>= ${source:Upstream-Version}),
+# should maybe have a python3-pandas-lib (<< ${source:Upstream-Version}.0), as well, but that leaves arch:all BD-Uninstallable if arch:amd64 builds first
+         tzdata
+Recommends: python3-scipy,
+            python3-matplotlib,
+            python3-tz,
+# for faster processing
+# see -lib for python3-numba
+            python3-bottleneck,
+            python3-numexpr,
+# for spreadsheet I/O
+            python3-odf,
+            python3-openpyxl,
+# for HTML table I/O
+            python3-bs4,
+            python3-html5lib,
+            python3-lxml,
+# for HDF5 I/O
+            python3-tables,
+# for styled output
+            python3-jinja2
+Suggests: python-pandas-doc,
+          python3-statsmodels
+Breaks:
+# 1.1 -> 1.3 API breaks, see #999415
+        python3-cfgrib (<= 0.9.9-1),
+        python3-joypy (<= 0.2.2-2),
+# 1.5 -> 2.1 API breaks, #1043240
+        cnvkit (<< 0.9.10~),
+        python3-altair (<< 5.0.1~),
+        python3-anndata (<= 0.8.0-4),
+        python3-biom-format (<< 2.1.15.2-3~),
+        python3-cooler (<< 0.9.3~),
+        python3-dask (<< 2023.12.1~),
+        python3-dials (<< 3.17.0~),
+        python3-dyda (<= 1.41.1-1.1),
+        python3-emperor (<< 1.0.3+ds-9~),
+        python3-esda (<= 2.5.1-1),
+        python3-feather-format (<< 0.3.1+dfsg1-8~),
+        python3-hypothesis (<< 6.83.1~),
+        python3-jsonpickle (<< 3.0.2+dfsg-1~),
+        python3-mirtop (<< 0.4.25-5~),
+        python3-nanoget (<< 1.19.3~),
+        python3-pauvre (<< 0.2.3-3~),
+        python3-pyani (<< 0.2.12-3~),
+        python3-pymatgen (<< 2024.1.27~),
+        python3-pyranges (<= 0.0.111+ds-6),
+        python3-seaborn (<< 0.13.0~),
+        python3-skbio (<< 0.5.9~),
+        python3-sklearn-pandas (<= 2.2.0-1.1),
+        python3-sunpy (<< 5.1.0-1~),
+# broken tests but probably not broken actual package        python3-tqdm (<= 4.64.1-1),
+# python3-ulmo affected test no longer run but not actually fixed
+        python3-ulmo (<= 0.8.8+dfsg1-2),
+        python3-upsetplot (<< 0.8.0-3~),
+        python3-xarray-sentinel (<< 0.9.5+ds-2~),
+        q2-cutadapt (<< 2023.7.0-1~),
+        q2-demux (<= 2023.9.1+dfsg-1),
+        q2-quality-control (<= 2022.11.1-2),
+        q2-taxa (<= 2023.9.0+dfsg-1),
+        q2-types (<= 2023.9.0-1),
+        q2templates (<= 2023.9.0+ds-1),
+# 2.1 -> 2.2 API breaks, #1069792
+        augur (<< 24.4.0-1~),
+        python3-influxdb (<< 5.3.2-1~),
+        python3-statsmodels (<< 0.14.2~),
+Description: data structures for "relational" or "labeled" data
+ pandas is a Python package providing fast, flexible, and expressive
+ data structures designed to make working with "relational" or
+ "labeled" data both easy and intuitive. It aims to be the fundamental
+ high-level building block for doing practical, real world data
+ analysis in Python. pandas is well suited for many different kinds of
+ data:
+ .
+  - Tabular data with heterogeneously-typed columns, as in an SQL
+    table or Excel spreadsheet
+  - Ordered and unordered (not necessarily fixed-frequency) time
+    series data.
+  - Arbitrary matrix data (homogeneously typed or heterogeneous) with
+    row and column labels
+  - Any other form of observational / statistical data sets. The data
+    actually need not be labeled at all to be placed into a pandas
+    data structure
+ .
+ This package contains the Python 3 version.
+
+Package: python-pandas-doc
+Architecture: all
+Section: doc
+Depends: ${misc:Depends},
+         ${sphinxdoc:Depends},
+         libjs-mathjax
+Suggests: python3-pandas
+Multi-Arch: foreign
+Description: data structures for "relational" or "labeled" data - documentation
+ pandas is a Python package providing fast, flexible, and expressive
+ data structures designed to make working with "relational" or
+ "labeled" data both easy and intuitive. It aims to be the fundamental
+ high-level building block for doing practical, real world data
+ analysis in Python. pandas is well suited for many different kinds of
+ data:
+ .
+  - Tabular data with heterogeneously-typed columns, as in an SQL
+    table or Excel spreadsheet
+  - Ordered and unordered (not necessarily fixed-frequency) time
+    series data.
+  - Arbitrary matrix data (homogeneously typed or heterogeneous) with
+    row and column labels
+  - Any other form of observational / statistical data sets. The data
+    actually need not be labeled at all to be placed into a pandas
+    data structure
+ .
+ This package contains the documentation.
+
+Package: python3-pandas-lib
+Architecture: any
+Multi-Arch: same
+Depends: ${misc:Depends},
+         ${shlibs:Depends},
+         ${python3:Depends},
+         python3-numpy
+# this is here to allow it to be arch-specific, to avoid numba bugs on other architectures
+Recommends: python3-numba [amd64]
+Description: low-level implementations and bindings for pandas
+ This is a low-level package for python3-pandas providing
+ architecture-dependent extensions.
+ .
+ Users should not need to install it directly.
diff --git a/copyright b/copyright
new file mode 100644 (file)
index 0000000..e31fb98
--- /dev/null
+++ b/copyright
@@ -0,0 +1,746 @@
+Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: pandas
+Upstream-Contact: pandas-dev@python.org
+Source: https://github.com/pandas-dev/pandas
+Files-Excluded: doc/source/user_guide/cookbook.rst
+                .gitignore
+                .gitattributes
+Comment: cookbook has around 100 Stack Overflow snippets (CC-BY-SA with possibly inadequate attribution), the other omissions are for non-copyright potential breakage; the generic pandas upstream copyright in * is repeated for other files modified by them (without bothering to keep track of exact years) because the format defines the initial * as applying only to files not listed elsewhere
+
+Files: *
+Copyright: 2008-2024 AQR Capital Management, LLC, Lambda Foundry, Inc., PyData Development Team, Open source contributors
+License: BSD-3
+Comment: Lumped together as notices disagree on which ones are which years.  See contributors_list.txt for a full list of commit authors.  The original source contains some CC-BY-SA snippets by Stack Overflow users (https://stackoverflow.com/users/1114/jeremy-banks, https://stackoverflow.com/users/387251/oliver, https://stackoverflow.com/users/3297752/noah-motion, https://stackoverflow.com/users/925671/bill, https://stackoverflow.com/users/1082349/foobar, https://stackoverflow.com/users/3089209/crantila, https://stackoverflow.com/users/2375855/ojdo, https://stackoverflow.com/users/487339/dsm, https://stackoverflow.com/users/2677943/swenzel), but these may be too small to be copyrightable, and the less trivial ones are patched out in this package
+
+Files: doc/sphinxext/announce.py
+Copyright: 2001-2017 Enthought, Inc. and SciPy Developers.
+           20xx-20xx AQR Capital Management, LLC, Lambda Foundry, Inc., PyData Development Team, Open source contributors
+License: BSD-3
+Comment: it is possible that other code was also taken from Scipy
+
+Files: pandas/_libs/include/pandas/portable.h
+Copyright: 2005-2020 Rich Felker and contributors (see LICENSES/MUSL_LICENSE for list)
+           20xx-20xx AQR Capital Management, LLC, Lambda Foundry, Inc., PyData Development Team, Open source contributors
+License: Expat and BSD-3
+Origin: musl src/ctype (partly)
+
+Files: pandas/_libs/include/pandas/skiplist.h
+Copyright: 2009, Raymond Hettinger
+           2016(?) Wes McKinney
+           20xx-20xx AQR Capital Management, LLC, Lambda Foundry, Inc., PyData Development Team, Open source contributors
+License: Expat and BSD-3
+Origin: http://code.activestate.com/recipes/576930/
+Comment: it is a Cython code "inspired" by the original Python code by Raymond
+
+Files: pandas/_libs/include/pandas/parser/tokenizer.h
+       pandas/_libs/src/parser/tokenizer.c
+Copyright: 2002 Michael Ringgaard
+           2011-2012 Warren Weckesser
+           2001-2012 Python Software Foundation and Python contributors
+           20xx-20xx AQR Capital Management, LLC, Lambda Foundry, Inc., PyData Development Team, Open source contributors
+License: Python and BSD-3
+Origin: csv (Python standard library), github.com/WarrenWeckesser/textreader
+
+Files: pandas/_libs/include/pandas/vendored/klib/*
+Copyright: 2008, 2009, 2011 by Attractive Chaos <attractor@live.co.uk>
+License: Expat
+
+Files: pandas/_libs/include/pandas/vendored/ujson/*
+       pandas/_libs/src/vendored/ujson/*
+Copyright: 1988-1993 The Regents of the University of California
+           1994 Sun Microsystems, Inc.
+           2005-2007 Nick Galbreath
+           2014 Electronic Arts Inc.
+           2011-2013 ESN Social Software AB and Jonas Tarnstrom
+           20xx-20xx AQR Capital Management, LLC, Lambda Foundry, Inc., PyData Development Team, Open source contributors
+License: BSD-3 and Expat
+Origin: ultrajson, modp_ascii and TCL
+
+Files: pandas/_libs/tslibs/parsing.pyx
+Copyright: 2003-2011 - Gustavo Niemeyer <gustavo@niemeyer.net>
+           2012-2014 - Tomi Pieviläinen <tomi.pievilainen@iki.fi>
+           2014-2016 - Yaron de Leeuw <me@jarondl.net>
+           2015-2017 - Paul Ganssle <paul@ganssle.io>
+           2015-2017 - dateutil contributors (see AUTHORS file)
+           20xx-20xx AQR Capital Management, LLC, Lambda Foundry, Inc., PyData Development Team, Open source contributors
+License: BSD-3
+Origin: dateutil (partly)
+
+Files: pandas/_libs/include/pandas/vendored/numpy/*
+       pandas/_libs/src/vendored/numpy/*
+Copyright: 2005-?, NumPy Developers
+License: BSD-3
+Origin: numpy
+Comment: the included license says 2005-2023, but this was updated without changing the actual code (https://github.com/pandas-dev/pandas/pull/54743) and the files say derived from Numpy 1.7 (2013)
+
+Files: pandas/_libs/window/aggregations.pyx
+       pandas/tests/window/test_rolling.py
+Copyright: 2010-2019 Keith Goodman
+           2019 Bottleneck Developers
+           2010-2012 Archipel Asset Management AB
+           20xx-20xx AQR Capital Management, LLC, Lambda Foundry, Inc., PyData Development Team, Open source contributors
+License: BSD-3
+Origin: bottleneck (partly)
+Comment: Original was BSD-2, but BSD-2 and BSD-3 = BSD-3
+
+Files: pandas/compat/*
+Copyright: 2010-2013 Benjamin Peterson
+           20xx-20xx AQR Capital Management, LLC, Lambda Foundry, Inc., PyData Development Team, Open source contributors
+License: Expat and BSD-3
+Origin: six
+
+Files: pandas/core/accessor.py
+Copyright: 2014-2018 xarray developers
+           20xx-20xx AQR Capital Management, LLC, Lambda Foundry, Inc., PyData Development Team, Open source contributors
+License: Apache-2.0 and BSD-3
+Origin: xarray (partly)
+
+Files: pandas/io/clipboard/*
+Copyright: 2010-2017 Albert Sweigart and Pyperclip contributors
+           20xx-20xx AQR Capital Management, LLC, Lambda Foundry, Inc., PyData Development Team, Open source contributors
+License: BSD-3
+Origin: Pyperclip
+
+Files: pandas/io/sas/sas7bdat.py
+Copyright: 2015-2019 Jared Hobbs
+           20xx-20xx AQR Capital Management, LLC, Lambda Foundry, Inc., PyData Development Team, Open source contributors
+Origin: https://bitbucket.org/jaredhobbs/sas7bdat
+License: Expat
+
+Files: pandas/tests/io/data/html/banklist.html
+       pandas/tests/io/data/csv/banklist.csv
+       pandas/tests/io/data/html/spam.html
+Copyright: None; by Federal Deposit Insurance Corporation and US Department of Agriculture
+License: public-domain
+
+Files: pandas/tests/io/data/html/wikipedia_states.html
+Copyright: 2002-2014 Wikipedia contributors (full list: https://en.wikipedia.org/w/index.php?title=List_of_U.S._states_and_territories_by_area&offset=20140630&action=history)
+License: CC-BY-SA-3.0
+
+Files: pandas/tests/io/data/spss/*
+Copyright: 2019 Hadley Wickham; RStudio; and Evan Miller
+License: Expat
+Origin: https://haven.tidyverse.org
+
+Files: pandas/util/version/*
+Copyright: 2021 Donald Stufft and individual contributors
+License: Apache-2.0 or BSD-3
+Origin: https://github.com/pypa/packaging/blob/main/packaging/_structures.py
+
+Files: scripts/no_bool_in_generic.py
+Copyright: 2017 Anthony Sottile
+           20xx-20xx AQR Capital Management, LLC, Lambda Foundry, Inc., PyData Development Team, Open source contributors
+License: Expat and BSD-3
+Origin: pyupgrade (partly)
+
+Files: setup.py
+Copyright: 2009-2012, Brian Granger, Min Ragan-Kelley (from pyzmq)
+           2004 Infrae (from lxml)
+           20xx-20xx AQR Capital Management, LLC, Lambda Foundry, Inc., PyData Development Team, Open source contributors
+License: BSD-3
+
+Files: debian/*
+Copyright: 2011-2018, Yaroslav Halchenko <debian@onerussian.com>
+           2019-2024, Debian Science Team
+License: BSD-3
+
+License: BSD-2
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions are
+  met:
+ .
+  1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+  2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in
+     the documentation and/or other materials provided with the
+     distribution.
+ .
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+License: BSD-3
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ .
+  * Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+  * Redistributions in binary form must reproduce the above
+     copyright notice, this list of conditions and the following
+     disclaimer in the documentation and/or other materials provided
+     with the distribution.
+  * Neither the name of the copyright holder nor the names of any
+     contributors may be used to endorse or promote products derived
+     from this software without specific prior written permission.
+ .
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+License: Expat
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+ .
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+ .
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
+
+License: Apache-2.0
+ See /usr/share/common-licenses/Apache-2.0
+ (xarray notice)
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+     http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ .
+ (version/__init__.py notice)
+ This file is dual licensed under the terms of the Apache License, Version
+ 2.0, and the BSD License. See the LICENSE file in the root of this repository
+ for complete details.
+
+License: Python
+ PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+ --------------------------------------------
+ .
+ 1. This LICENSE AGREEMENT is between the Python Software Foundation
+ ("PSF"), and the Individual or Organization ("Licensee") accessing and
+ otherwise using this software ("Python") in source or binary form and
+ its associated documentation.
+ .
+ 2. Subject to the terms and conditions of this License Agreement, PSF hereby
+ grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+ analyze, test, perform and/or display publicly, prepare derivative works,
+ distribute, and otherwise use Python alone or in any derivative version,
+ provided, however, that PSF's License Agreement and PSF's notice of copyright,
+ i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
+ Python Software Foundation; All Rights Reserved" are retained in Python alone or
+ in any derivative version prepared by Licensee.
+ .
+ 3. In the event Licensee prepares a derivative work that is based on
+ or incorporates Python or any part thereof, and wants to make
+ the derivative work available to others as provided herein, then
+ Licensee hereby agrees to include in any such work a brief summary of
+ the changes made to Python.
+ .
+ 4. PSF is making Python available to Licensee on an "AS IS"
+ basis.  PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+ IMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+ DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+ FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+ INFRINGE ANY THIRD PARTY RIGHTS.
+ .
+ 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+ FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+ A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+ OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+ .
+ 6. This License Agreement will automatically terminate upon a material
+ breach of its terms and conditions.
+ .
+ 7. Nothing in this License Agreement shall be deemed to create any
+ relationship of agency, partnership, or joint venture between PSF and
+ Licensee.  This License Agreement does not grant permission to use PSF
+ trademarks or trade name in a trademark sense to endorse or promote
+ products or services of Licensee, or any third party.
+ .
+ 8. By copying, installing or otherwise using Python, Licensee
+ agrees to be bound by the terms and conditions of this License
+ Agreement.
+
+License: public-domain
+ US federal government works
+
+License: CC-BY-SA-3.0
+ Creative Commons Attribution-ShareAlike 3.0 Unported
+ .
+ .
+ .
+         CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE LEGAL SERVICES. DISTRIBUTION OF THIS
+         LICENSE DOES NOT CREATE AN ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS INFORMATION ON
+         AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES REGARDING THE INFORMATION PROVIDED,
+         AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM ITS USE.
+ .
+         License
+ .
+ .
+ .
+      THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS PUBLIC LICENSE
+         ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER APPLICABLE
+         LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS
+         PROHIBITED.
+ .
+      BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE BOUND BY THE TERMS OF THIS
+         LICENSE. TO THE EXTENT THIS LICENSE MAY BE CONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE
+         RIGHTS CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS.
+ .
+ .
+ .
+ .
+ .
+ 1.
+          Definitions
+ .
+ .
+ .
+ .
+ a.
+            "Adaptation" means a work based upon the Work, or upon the Work and other
+               pre-existing works, such as a translation, adaptation, derivative work, arrangement of
+               music or other alterations of a literary or artistic work, or phonogram or performance and
+               includes cinematographic adaptations or any other form in which the Work may be recast,
+               transformed, or adapted including in any form recognizably derived from the original,
+               except that a work that constitutes a Collection will not be considered an Adaptation for
+               the purpose of this License. For the avoidance of doubt, where the Work is a musical work,
+               performance or phonogram, the synchronization of the Work in timed-relation with a moving
+               image ("synching") will be considered an Adaptation for the purpose of this
+               License.
+ .
+ .
+ .
+ b.
+            "Collection" means a collection of literary or artistic works, such as
+               encyclopedias and anthologies, or performances, phonograms or broadcasts, or other works
+               or subject matter other than works listed in Section 1(f) below, which, by reason of the
+               selection and arrangement of their contents, constitute intellectual creations, in which
+               the Work is included in its entirety in unmodified form along with one or more other
+               contributions, each constituting separate and independent works in themselves, which
+               together are assembled into a collective whole. A work that constitutes a Collection will
+               not be considered an Adaptation (as defined below) for the purposes of this License.
+ .
+ .
+ .
+ c.
+            "Creative Commons Compatible License" means a license that is listed at
+               http://creativecommons.org/compatiblelicenses that has been approved by Creative Commons
+               as being essentially equivalent to this License, including, at a minimum, because that
+               license: (i) contains terms that have the same purpose, meaning and effect as the License
+               Elements of this License; and, (ii) explicitly permits the relicensing of adaptations of
+               works made available under that license under this License or a Creative Commons
+               jurisdiction license with the same License Elements as this License.
+ .
+ .
+ .
+ d.
+            "Distribute" means to make available to the public the original and copies of the
+               Work or Adaptation, as appropriate, through sale or other transfer of ownership.
+ .
+ .
+ .
+ e.
+            "License Elements" means the following high-level license attributes as selected by
+               Licensor and indicated in the title of this License: Attribution, ShareAlike.
+ .
+ .
+ .
+ f.
+            "Licensor" means the individual, individuals, entity or entities that offer(s) the
+               Work under the terms of this License.
+ .
+ .
+ .
+ g.
+            "Original Author" means, in the case of a literary or artistic work, the
+               individual, individuals, entity or entities who created the Work or if no individual or
+               entity can be identified, the publisher; and in addition (i) in the case of a performance
+               the actors, singers, musicians, dancers, and other persons who act, sing, deliver,
+               declaim, play in, interpret or otherwise perform literary or artistic works or expressions
+               of folklore; (ii) in the case of a phonogram the producer being the person or legal entity
+               who first fixes the sounds of a performance or other sounds; and, (iii) in the case of
+               broadcasts, the organization that transmits the broadcast.
+ .
+ .
+ .
+ h.
+            "Work" means the literary and/or artistic work offered under the terms of this
+               License including without limitation any production in the literary, scientific and
+               artistic domain, whatever may be the mode or form of its expression including digital
+               form, such as a book, pamphlet and other writing; a lecture, address, sermon or other work
+               of the same nature; a dramatic or dramatico-musical work; a choreographic work or
+               entertainment in dumb show; a musical composition with or without words; a cinematographic
+               work to which are assimilated works expressed by a process analogous to cinematography; a
+               work of drawing, painting, architecture, sculpture, engraving or lithography; a
+               photographic work to which are assimilated works expressed by a process analogous to
+               photography; a work of applied art; an illustration, map, plan, sketch or
+               three-dimensional work relative to geography, topography, architecture or science; a
+               performance; a broadcast; a phonogram; a compilation of data to the extent it is protected
+               as a copyrightable work; or a work performed by a variety or circus performer to the
+               extent it is not otherwise considered a literary or artistic work.
+ .
+ .
+ .
+ i.
+            "You" means an individual or entity exercising rights under this License who has
+               not previously violated the terms of this License with respect to the Work, or who has
+               received express permission from the Licensor to exercise rights under this License
+               despite a previous violation.
+ .
+ .
+ .
+ j.
+            "Publicly Perform" means to perform public recitations of the Work and to
+               communicate to the public those public recitations, by any means or process, including by
+               wire or wireless means or public digital performances; to make available to the public
+               Works in such a way that members of the public may access these Works from a place and at
+               a place individually chosen by them; to perform the Work to the public by any means or
+               process and the communication to the public of the performances of the Work, including by
+               public digital performance; to broadcast and rebroadcast the Work by any means including
+               signs, sounds or images.
+ .
+ .
+ .
+ k.
+            "Reproduce" means to make copies of the Work by any means including without
+               limitation by sound or visual recordings and the right of fixation and reproducing
+               fixations of the Work, including storage of a protected performance or phonogram in
+               digital form or other electronic medium.
+ .
+ .
+ .
+ .
+ .
+ 2.
+          Fair Dealing Rights. Nothing in this License is intended to reduce, limit, or restrict any uses
+             free from copyright or rights arising from limitations or exceptions that are provided for in
+             connection with the copyright protection under copyright law or other applicable laws.
+ .
+ .
+ .
+ 3.
+          License Grant. Subject to the terms and conditions of this License, Licensor hereby grants You a
+             worldwide, royalty-free, non-exclusive, perpetual (for the duration of the applicable
+             copyright) license to exercise the rights in the Work as stated below:
+ .
+ .
+ .
+ .
+ a.
+            to Reproduce the Work, to incorporate the Work into one or more Collections, and to Reproduce
+               the Work as incorporated in the Collections;
+ .
+ .
+ .
+ b.
+            to create and Reproduce Adaptations provided that any such Adaptation, including any
+               translation in any medium, takes reasonable steps to clearly label, demarcate or otherwise
+               identify that changes were made to the original Work. For example, a translation could be
+               marked "The original work was translated from English to Spanish," or a
+               modification could indicate "The original work has been modified.";
+ .
+ .
+ .
+ c.
+            to Distribute and Publicly Perform the Work including as incorporated in Collections; and,
+ .
+ .
+ .
+ d.
+            to Distribute and Publicly Perform Adaptations.
+ .
+ .
+ .
+ e.
+            For the avoidance of doubt:
+ .
+ .
+ .
+ .
+ i.
+              Non-waivable Compulsory License Schemes. In those jurisdictions in which the right to
+                 collect royalties through any statutory or compulsory licensing scheme cannot be
+                 waived, the Licensor reserves the exclusive right to collect such royalties for any
+                 exercise by You of the rights granted under this License;
+ .
+ .
+ .
+ ii.
+              Waivable Compulsory License Schemes. In those jurisdictions in which the right to collect
+                 royalties through any statutory or compulsory licensing scheme can be waived, the
+                 Licensor waives the exclusive right to collect such royalties for any exercise by You
+                 of the rights granted under this License; and,
+ .
+ .
+ .
+ iii.
+              Voluntary License Schemes. The Licensor waives the right to collect royalties, whether
+                 individually or, in the event that the Licensor is a member of a collecting society
+                 that administers voluntary licensing schemes, via that society, from any exercise by
+                 You of the rights granted under this License.
+ .
+ .
+ .
+ .
+              The above rights may be exercised in all media and formats whether now known or hereafter
+                 devised. The above rights include the right to make such modifications as are
+                 technically necessary to exercise the rights in other media and formats. Subject to
+                 Section 8(f), all rights not expressly granted by Licensor are hereby reserved.
+ .
+ .
+ .
+ .
+ 4.
+          Restrictions. The license granted in Section 3 above is expressly made subject to and limited by
+             the following restrictions:
+ .
+ .
+ .
+ .
+ a.
+            You may Distribute or Publicly Perform the Work only under the terms of this License. You
+               must include a copy of, or the Uniform Resource Identifier (URI) for, this License with
+               every copy of the Work You Distribute or Publicly Perform. You may not offer or impose any
+               terms on the Work that restrict the terms of this License or the ability of the recipient
+               of the Work to exercise the rights granted to that recipient under the terms of the
+               License. You may not sublicense the Work. You must keep intact all notices that refer to
+               this License and to the disclaimer of warranties with every copy of the Work You
+               Distribute or Publicly Perform. When You Distribute or Publicly Perform the Work, You may
+               not impose any effective technological measures on the Work that restrict the ability of a
+               recipient of the Work from You to exercise the rights granted to that recipient under the
+               terms of the License. This Section 4(a) applies to the Work as incorporated in a
+               Collection, but this does not require the Collection apart from the Work itself to be made
+               subject to the terms of this License. If You create a Collection, upon notice from any
+               Licensor You must, to the extent practicable, remove from the Collection any credit as
+               required by Section 4(c), as requested. If You create an Adaptation, upon notice from any
+               Licensor You must, to the extent practicable, remove from the Adaptation any credit as
+               required by Section 4(c), as requested.
+ .
+ .
+ .
+ b.
+            You may Distribute or Publicly Perform an Adaptation only under the terms of: (i) this
+               License; (ii) a later version of this License with the same License Elements as this
+               License; (iii) a Creative Commons jurisdiction license (either this or a later license
+               version) that contains the same License Elements as this License (e.g.,
+               Attribution-ShareAlike 3.0 US)); (iv) a Creative Commons Compatible License. If you
+               license the Adaptation under one of the licenses mentioned in (iv), you must comply with
+               the terms of that license. If you license the Adaptation under the terms of any of the
+               licenses mentioned in (i), (ii) or (iii) (the "Applicable License"), you must
+               comply with the terms of the Applicable License generally and the following provisions:
+               (I) You must include a copy of, or the URI for, the Applicable License with every copy of
+               each Adaptation You Distribute or Publicly Perform; (II) You may not offer or impose any
+               terms on the Adaptation that restrict the terms of the Applicable License or the ability
+               of the recipient of the Adaptation to exercise the rights granted to that recipient under
+               the terms of the Applicable License; (III) You must keep intact all notices that refer to
+               the Applicable License and to the disclaimer of warranties with every copy of the Work as
+               included in the Adaptation You Distribute or Publicly Perform; (IV) when You Distribute or
+               Publicly Perform the Adaptation, You may not impose any effective technological measures
+               on the Adaptation that restrict the ability of a recipient of the Adaptation from You to
+               exercise the rights granted to that recipient under the terms of the Applicable License.
+               This Section 4(b) applies to the Adaptation as incorporated in a Collection, but this does
+               not require the Collection apart from the Adaptation itself to be made subject to the
+               terms of the Applicable License.
+ .
+ .
+ .
+ c.
+            If You Distribute, or Publicly Perform the Work or any Adaptations or Collections, You must,
+               unless a request has been made pursuant to Section 4(a), keep intact all copyright notices
+               for the Work and provide, reasonable to the medium or means You are utilizing: (i) the
+               name of the Original Author (or pseudonym, if applicable) if supplied, and/or if the
+               Original Author and/or Licensor designate another party or parties (e.g., a sponsor
+               institute, publishing entity, journal) for attribution ("Attribution Parties")
+               in Licensor's copyright notice, terms of service or by other reasonable means, the
+               name of such party or parties; (ii) the title of the Work if supplied; (iii) to the extent
+               reasonably practicable, the URI, if any, that Licensor specifies to be associated with the
+               Work, unless such URI does not refer to the copyright notice or licensing information for
+               the Work; and (iv), consistent with
+ Section
+               3(b), in the case of an Adaptation, a credit
+               identifying the use of the Work in the Adaptation (e.g., "French translation of the
+               Work by Original Author," or "Screenplay based on original Work by Original
+               Author"). The credit required by this Section 4(c) may be implemented in any
+               reasonable manner; provided, however, that in the case of a Adaptation or Collection, at a
+               minimum such credit will appear, if a credit for all contributing authors of the
+               Adaptation or Collection appears, then as part of these credits and in a manner at least
+               as prominent as the credits for the other contributing authors. For the avoidance of
+               doubt, You may only use the credit required by this Section for the purpose of attribution
+               in the manner set out above and, by exercising Your rights under this License, You may not
+               implicitly or explicitly assert or imply any connection with, sponsorship or endorsement
+               by the Original Author, Licensor and/or Attribution Parties, as appropriate, of You or
+               Your use of the Work, without the separate, express prior written permission of the
+               Original Author, Licensor and/or Attribution Parties.
+ .
+ .
+ .
+ d.
+            Except as otherwise agreed in writing by the Licensor or as may be otherwise permitted by
+               applicable law, if You Reproduce, Distribute or Publicly Perform the Work either by itself
+               or as part of any Adaptations or Collections, You must not distort, mutilate, modify or
+               take other derogatory action in relation to the Work which would be prejudicial to the
+               Original Author's honor or reputation. Licensor agrees that in those jurisdictions
+               (e.g. Japan), in which any exercise of the right granted in Section 3(b) of this License
+               (the right to make Adaptations) would be deemed to be a distortion, mutilation,
+               modification or other derogatory action prejudicial to the Original Author's honor
+               and reputation, the Licensor will waive or not assert, as appropriate, this Section, to
+               the fullest extent permitted by the applicable national law, to enable You to reasonably
+               exercise Your right under Section 3(b) of this License (right to make Adaptations) but not
+               otherwise.
+ .
+ .
+ .
+ .
+ .
+ 5.
+          Representations, Warranties and Disclaimer
+          UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR OFFERS THE WORK AS-IS AND
+             MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE WORK, EXPRESS, IMPLIED,
+             STATUTORY OR OTHERWISE, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY,
+             FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS,
+             ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR NOT DISCOVERABLE. SOME
+             JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT
+             APPLY TO YOU.
+ .
+ .
+ .
+ .
+ 6.
+          Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, IN NO EVENT WILL
+             LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL,
+             PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF
+             LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+ .
+ .
+ .
+ 7.
+          Termination
+ .
+ .
+ .
+ .
+ a.
+            This License and the rights granted hereunder will terminate automatically upon any breach by
+               You of the terms of this License. Individuals or entities who have received Adaptations or
+               Collections from You under this License, however, will not have their licenses terminated
+               provided such individuals or entities remain in full compliance with those licenses.
+               Sections 1, 2, 5, 6, 7, and 8 will survive any termination of this License.
+ .
+ .
+ .
+ b.
+            Subject to the above terms and conditions, the license granted here is perpetual (for the
+               duration of the applicable copyright in the Work). Notwithstanding the above, Licensor
+               reserves the right to release the Work under different license terms or to stop
+               distributing the Work at any time; provided, however that any such election will not serve
+               to withdraw this License (or any other license that has been, or is required to be,
+               granted under the terms of this License), and this License will continue in full force and
+               effect unless terminated as stated above.
+ .
+ .
+ .
+ .
+ .
+ 8.
+          Miscellaneous
+ .
+ .
+ .
+ .
+ a.
+            Each time You Distribute or Publicly Perform the Work or a Collection, the Licensor offers to
+               the recipient a license to the Work on the same terms and conditions as the license
+               granted to You under this License.
+ .
+ .
+ .
+ b.
+            Each time You Distribute or Publicly Perform an Adaptation, Licensor offers to the recipient
+               a license to the original Work on the same terms and conditions as the license granted to
+               You under this License.
+ .
+ .
+ .
+ c.
+            If any provision of this License is invalid or unenforceable under applicable law, it shall
+               not affect the validity or enforceability of the remainder of the terms of this License,
+               and without further action by the parties to this agreement, such provision shall be
+               reformed to the minimum extent necessary to make such provision valid and enforceable.
+ .
+ .
+ .
+ d.
+            No term or provision of this License shall be deemed waived and no breach consented to unless
+               such waiver or consent shall be in writing and signed by the party to be charged with such
+               waiver or consent.
+ .
+ .
+ .
+ e.
+            This License constitutes the entire agreement between the parties with respect to the Work
+               licensed here. There are no understandings, agreements or representations with respect to
+               the Work not specified here. Licensor shall not be bound by any additional provisions that
+               may appear in any communication from You. This License may not be modified without the
+               mutual written agreement of the Licensor and You.
+ .
+ .
+ .
+ f.
+            The rights granted under, and the subject matter referenced, in this License were drafted
+               utilizing the terminology of the Berne Convention for the Protection of Literary and
+               Artistic Works (as amended on September 28, 1979), the Rome Convention of 1961, the WIPO
+               Copyright Treaty of 1996, the WIPO Performances and Phonograms Treaty of 1996 and the
+               Universal Copyright Convention (as revised on July 24, 1971). These rights and subject
+               matter take effect in the relevant jurisdiction in which the License terms are sought to
+               be enforced according to the corresponding provisions of the implementation of those
+               treaty provisions in the applicable national law. If the standard suite of rights granted
+               under applicable copyright law includes additional rights not granted under this License,
+               such additional rights are deemed to be included in the License; this License is not
+               intended to restrict the license of any rights under applicable law.
+ .
+ .
+ .
+ .
+      Creative Commons Notice
+ .
+      Creative Commons is not a party to this License, and makes no warranty whatsoever in connection with the
+         Work. Creative Commons will not be liable to You or any party on any legal theory for any damages
+         whatsoever, including without limitation any general, special, incidental or consequential damages
+         arising in connection to this license. Notwithstanding the foregoing two (2) sentences, if Creative
+         Commons has expressly identified itself as the Licensor hereunder, it shall have all rights and
+         obligations of Licensor.
+ .
+      Except for the limited purpose of indicating to the public that the Work is licensed under the CCPL,
+         Creative Commons does not authorize the use by either party of the trademark "Creative
+         Commons" or any related trademark or logo of Creative Commons without the prior written consent
+         of Creative Commons. Any permitted use will be in compliance with Creative Commons' then-current
+         trademark usage guidelines, as may be published on its website or otherwise made available upon
+         request from time to time. For the avoidance of doubt, this trademark restriction does not form part
+         of the License.
+ .
+      Creative Commons may be contacted at http://creativecommons.org/.
diff --git a/gbp.conf b/gbp.conf
new file mode 100644 (file)
index 0000000..8b88dba
--- /dev/null
+++ b/gbp.conf
@@ -0,0 +1,7 @@
+[DEFAULT]
+# the default branch for upstream sources:
+upstream-branch = upstream
+# the default branch for the debian patch:
+debian-branch = main
+
+
diff --git a/patches/1029251_ignore_rounding_error.patch b/patches/1029251_ignore_rounding_error.patch
new file mode 100644 (file)
index 0000000..28f2c99
--- /dev/null
@@ -0,0 +1,39 @@
+Description: Don't fail plot tests on rounding error
+
+(upstream seem to have _disabled_ the affected tests
+...see also test_series)
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Bug-Debian: https://bugs.debian.org/1029251
+Forwarded: no
+
+--- a/pandas/tests/plotting/frame/test_frame_subplots.py
++++ b/pandas/tests/plotting/frame/test_frame_subplots.py
+@@ -3,6 +3,7 @@
+ import string
+ import numpy as np
++from numpy.testing import assert_array_almost_equal_nulp
+ import pytest
+ from pandas.compat import is_platform_linux
+@@ -435,7 +436,7 @@ class TestDataFramePlotsSubplots:
+         # no subplots
+         df = DataFrame({"A": [3] * 5, "B": list(range(1, 6))}, index=range(5))
+         ax = df.plot.bar(grid=True, log=True)
+-        tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
++        assert_array_almost_equal_nulp(ax.yaxis.get_ticklocs(), expected, 4)
+     @pytest.mark.xfail(
+         np_version_gte1p24 and is_platform_linux(),
+@@ -449,8 +450,8 @@ class TestDataFramePlotsSubplots:
+             log=True, subplots=True
+         )
+-        tm.assert_numpy_array_equal(ax[0].yaxis.get_ticklocs(), expected)
+-        tm.assert_numpy_array_equal(ax[1].yaxis.get_ticklocs(), expected)
++        assert_array_almost_equal_nulp(ax[0].yaxis.get_ticklocs(), expected, 4)
++        assert_array_almost_equal_nulp(ax[1].yaxis.get_ticklocs(), expected, 4)
+     def test_boxplot_subplots_return_type_default(self, hist_df):
+         df = hist_df
diff --git a/patches/1068104_time64.patch b/patches/1068104_time64.patch
new file mode 100644 (file)
index 0000000..20bff4e
--- /dev/null
@@ -0,0 +1,89 @@
+Description: Don't require 32-bit to be time32
+
+Debian armhf/armel (but not i386) are now time64
+
+Author: Graham Inggs, Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Bug-Debian: https://bugs.debian.org/1068104
+Forwarded: no
+
+--- a/pandas/tests/indexes/datetimes/methods/test_resolution.py
++++ b/pandas/tests/indexes/datetimes/methods/test_resolution.py
+@@ -24,7 +24,7 @@ def test_dti_resolution(request, tz_naiv
+     tz = tz_naive_fixture
+     if freq == "YE" and not IS64 and isinstance(tz, tzlocal):
+         request.applymarker(
+-            pytest.mark.xfail(reason="OverflowError inside tzlocal past 2038")
++            pytest.mark.xfail(reason="OverflowError inside tzlocal past 2038", strict=False)
+         )
+     idx = date_range(start="2013-04-01", periods=30, freq=freq, tz=tz)
+--- a/pandas/tests/tseries/offsets/test_common.py
++++ b/pandas/tests/tseries/offsets/test_common.py
+@@ -143,7 +143,7 @@ def test_apply_out_of_range(request, tz_
+             # If we hit OutOfBoundsDatetime on non-64 bit machines
+             # we'll drop out of the try clause before the next test
+             request.applymarker(
+-                pytest.mark.xfail(reason="OverflowError inside tzlocal past 2038")
++                pytest.mark.xfail(reason="OverflowError inside tzlocal past 2038", strict=False)
+             )
+         elif (
+             isinstance(tz, tzlocal)
+--- a/pandas/tests/tools/test_to_timedelta.py
++++ b/pandas/tests/tools/test_to_timedelta.py
+@@ -244,7 +244,7 @@ class TestTimedeltas:
+         actual = to_timedelta([val])
+         assert actual[0]._value == np.timedelta64("NaT").astype("int64")
+-    @pytest.mark.xfail(not IS64, reason="Floating point error")
++    @pytest.mark.xfail(not IS64, reason="Floating point error", strict=False)
+     def test_to_timedelta_float(self):
+         # https://github.com/pandas-dev/pandas/issues/25077
+         arr = np.arange(0, 1, 1e-6)[-10:]
+--- a/pandas/tests/io/sas/test_sas7bdat.py
++++ b/pandas/tests/io/sas/test_sas7bdat.py
+@@ -15,6 +15,9 @@ import pandas as pd
+ import pandas._testing as tm
+ from pandas.io.sas.sas7bdat import SAS7BDATReader
++import platform
++import re
++is_platform_x86_32 = bool(re.match("i.?86|x86", platform.uname()[4])) and not IS64
+ @pytest.fixture
+@@ -202,7 +205,7 @@ def test_date_time(datapath):
+     res = df0["DateTimeHi"].astype("M8[us]").dt.round("ms")
+     df0["DateTimeHi"] = res.astype("M8[ms]")
+-    if not IS64:
++    if is_platform_x86_32:
+         # No good reason for this, just what we get on the CI
+         df0.loc[0, "DateTimeHi"] += np.timedelta64(1, "ms")
+         df0.loc[[2, 3], "DateTimeHi"] -= np.timedelta64(1, "ms")
+@@ -297,7 +300,7 @@ def test_max_sas_date(datapath):
+         columns=["text", "dt_as_float", "dt_as_dt", "date_as_float", "date_as_date"],
+     )
+-    if not IS64:
++    if is_platform_x86_32:
+         # No good reason for this, just what we get on the CI
+         expected.loc[:, "dt_as_dt"] -= np.timedelta64(1, "ms")
+@@ -340,7 +343,7 @@ def test_max_sas_date_iterator(datapath)
+             columns=col_order,
+         ),
+     ]
+-    if not IS64:
++    if is_platform_x86_32:
+         # No good reason for this, just what we get on the CI
+         expected[0].loc[0, "dt_as_dt"] -= np.timedelta64(1, "ms")
+         expected[1].loc[0, "dt_as_dt"] -= np.timedelta64(1, "ms")
+@@ -371,7 +374,7 @@ def test_null_date(datapath):
+             ),
+         },
+     )
+-    if not IS64:
++    if is_platform_x86_32:
+         # No good reason for this, just what we get on the CI
+         expected.loc[0, "datetimecol"] -= np.timedelta64(1, "ms")
+     tm.assert_frame_equal(df, expected)
diff --git a/patches/1068422_ignore_dask_tests.patch b/patches/1068422_ignore_dask_tests.patch
new file mode 100644 (file)
index 0000000..50ff1c5
--- /dev/null
@@ -0,0 +1,35 @@
+Description: Ignore dask test failures
+
+(to unblock other fixes -
+these are probably a real bug but not in pandas)
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Bug-Debian: https://bugs.debian.org/1068422
+Forwarded: no
+
+--- a/pandas/tests/test_downstream.py
++++ b/pandas/tests/test_downstream.py
+@@ -31,6 +31,7 @@ def df():
+     return DataFrame({"A": [1, 2, 3]})
++@pytest.mark.xfail(reason="dask https://bugs.debian.org/1068422",raises=TypeError,strict=False)
+ def test_dask(df):
+     try:
+         from multiprocessing.pool import ThreadPool
+@@ -53,6 +54,7 @@ def test_dask(df):
+         pd.set_option("compute.use_numexpr", olduse)
++@pytest.mark.xfail(reason="dask https://bugs.debian.org/1068422",raises=TypeError,strict=False)
+ def test_dask_ufunc():
+     # dask sets "compute.use_numexpr" to False, so catch the current value
+     # and ensure to reset it afterwards to avoid impacting other tests
+@@ -72,6 +74,7 @@ def test_dask_ufunc():
+         pd.set_option("compute.use_numexpr", olduse)
++@pytest.mark.xfail(reason="dask https://bugs.debian.org/1068422",raises=TypeError,strict=False)
+ def test_construct_dask_float_array_int_dtype_match_ndarray():
+     # GH#40110 make sure we treat a float-dtype dask array with the same
+     #  rules we would for an ndarray
diff --git a/patches/1088988_xarray_pyreadstat_compat.patch b/patches/1088988_xarray_pyreadstat_compat.patch
new file mode 100644 (file)
index 0000000..5d64e68
--- /dev/null
@@ -0,0 +1,35 @@
+Description: Fix pyreadstat and some xarray fails, test others
+
+Tests failing with newer xarray and/or pyreadstat
+
+Author: Richard Shadrach, Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Origin: partly https://github.com/pandas-dev/pandas/pull/60109
+Bug: partly https://github.com/pydata/xarray/issues/9661
+Bug-Debian: https://bugs.debian.org/1088988
+Forwarded: no
+
+--- a/pandas/tests/generic/test_to_xarray.py
++++ b/pandas/tests/generic/test_to_xarray.py
+@@ -52,9 +52,10 @@ class TestDataFrameToXArray:
+         # datetimes w/tz are preserved
+         # column names are lost
+         expected = df.copy()
+-        expected["f"] = expected["f"].astype(
+-            object if not using_infer_string else "string[pyarrow_numpy]"
+-        )
++        # breaks in xarray >= 2024.10.0(?)
++        # debug print
++        r0 = result.to_dataframe()
++        print("expected",expected, expected.index, expected.dtypes, "actual",result, r0, r0.index, r0.dtypes,sep='\n')
+         expected.columns.name = None
+         tm.assert_frame_equal(result.to_dataframe(), expected)
+--- a/pandas/tests/io/test_spss.py
++++ b/pandas/tests/io/test_spss.py
+@@ -162,4 +162,6 @@ def test_spss_metadata(datapath):
+                 "modification_time": datetime.datetime(2015, 2, 6, 14, 33, 36),
+             }
+         )
++    if Version(pyreadstat.__version__) >= Version("1.2.8"):
++        metadata["mr_sets"] = {}
+     assert df.attrs == metadata
diff --git a/patches/2p1_openpyxl_errors.patch b/patches/2p1_openpyxl_errors.patch
new file mode 100644 (file)
index 0000000..3ff531b
--- /dev/null
@@ -0,0 +1,34 @@
+Description: Fix test failures when xlsxwriter is not installed
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: no
+
+--- a/pandas/tests/io/excel/test_openpyxl.py
++++ b/pandas/tests/io/excel/test_openpyxl.py
+@@ -124,13 +124,14 @@ def test_engine_kwargs_append_invalid(ex
+                 DataFrame(["good"]).to_excel(writer, sheet_name="Sheet2")
++@td.skip_if_no("xlsxwriter")
+ @pytest.mark.parametrize("data_only, expected", [(True, 0), (False, "=1+1")])
+ def test_engine_kwargs_append_data_only(ext, data_only, expected):
+     # GH 43445
+     # tests whether the data_only engine_kwarg actually works well for
+     # openpyxl's load_workbook
+     with tm.ensure_clean(ext) as f:
+-        DataFrame(["=1+1"]).to_excel(f)
++        DataFrame(["=1+1"]).to_excel(f, engine="xlsxwriter") # with openpyxl here, data_only=True gives None/np.nan not 0
+         with ExcelWriter(
+             f, engine="openpyxl", mode="a", engine_kwargs={"data_only": data_only}
+         ) as writer:
+--- a/pandas/tests/io/excel/test_writers.py
++++ b/pandas/tests/io/excel/test_writers.py
+@@ -1420,6 +1420,8 @@ class TestExcelWriterEngineTests:
+                     assert isinstance(writer, _XlsxWriter)
+                 else:
+                     assert isinstance(writer, klass)
++                    # openpyxl raises on closing if no sheets are written
++                    DataFrame().to_excel(writer, sheet_name="Sheet1")
+     def test_ExcelWriter_dispatch_raises(self):
+         with pytest.raises(ValueError, match="No engine"):
diff --git a/patches/accept_system_tzdata.patch b/patches/accept_system_tzdata.patch
new file mode 100644 (file)
index 0000000..fde9c27
--- /dev/null
@@ -0,0 +1,30 @@
+Description: Don't require python3-tzdata
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Bug-Debian: https://bugs.debian.org/1043968
+Forwarded: no
+
+--- a/pandas/_libs/tslibs/timezones.pyx
++++ b/pandas/_libs/tslibs/timezones.pyx
+@@ -65,9 +65,6 @@ cdef bint is_utc_zoneinfo(tzinfo tz):
+             utc_zoneinfo = ZoneInfo("UTC")
+         except zoneinfo.ZoneInfoNotFoundError:
+             return False
+-        # Warn if tzdata is too old, even if there is a system tzdata to alert
+-        # users about the mismatch between local/system tzdata
+-        import_optional_dependency("tzdata", errors="warn", min_version="2022.7")
+     return tz is utc_zoneinfo
+--- a/pyproject.toml
++++ b/pyproject.toml
+@@ -31,8 +31,7 @@ dependencies = [
+   "numpy>=1.23.2; python_version=='3.11'",
+   "numpy>=1.26.0; python_version>='3.12'",
+   "python-dateutil>=2.8.2",
+-  "pytz>=2020.1",
+-  "tzdata>=2022.7"
++  "pytz>=2020.1"
+ ]
+ classifiers = [
+     'Development Status :: 5 - Production/Stable',
diff --git a/patches/add_missing_importorskip.patch b/patches/add_missing_importorskip.patch
new file mode 100644 (file)
index 0000000..6f7ee07
--- /dev/null
@@ -0,0 +1,55 @@
+Description: Skip tests when dependencies are missing
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: no
+
+--- a/pandas/tests/io/parser/conftest.py
++++ b/pandas/tests/io/parser/conftest.py
+@@ -12,6 +12,7 @@ from pandas import (
+     read_table,
+ )
+ import pandas._testing as tm
++import pandas.util._test_decorators as td
+ class BaseParser:
+@@ -118,7 +119,7 @@ _pyarrowParser = PyArrowParser
+ _py_parsers_only = [_pythonParser]
+ _c_parsers_only = [_cParserHighMemory, _cParserLowMemory]
+-_pyarrow_parsers_only = [pytest.param(_pyarrowParser, marks=pytest.mark.single_cpu)]
++_pyarrow_parsers_only = [pytest.param(_pyarrowParser, marks=[pytest.mark.single_cpu, td.skip_if_no("pyarrow")])]
+ _all_parsers = [*_c_parsers_only, *_py_parsers_only, *_pyarrow_parsers_only]
+@@ -182,8 +183,8 @@ def _get_all_parser_float_precision_comb
+             parser = parser.values[0]
+         for precision in parser.float_precision_choices:
+             # Re-wrap in pytest.param for pyarrow
+-            mark = pytest.mark.single_cpu if parser.engine == "pyarrow" else ()
+-            param = pytest.param((parser(), precision), marks=mark)
++            marks = [pytest.mark.single_cpu, td.skip_if_no("pyarrow")] if parser.engine == "pyarrow" else ()
++            param = pytest.param((parser(), precision), marks=marks)
+             params.append(param)
+             ids.append(f"{parser_id}-{precision}")
+--- a/pandas/tests/io/formats/style/test_bar.py
++++ b/pandas/tests/io/formats/style/test_bar.py
+@@ -347,6 +347,7 @@ def test_styler_bar_with_NA_values():
+     assert expected_substring in html_output2
++@td.skip_if_no("pyarrow")
+ def test_style_bar_with_pyarrow_NA_values():
+     data = """name,age,test1,test2,teacher
+         Adam,15,95.0,80,Ashby
+--- a/pandas/tests/series/test_api.py
++++ b/pandas/tests/series/test_api.py
+@@ -171,6 +171,7 @@ class TestSeriesMisc:
+     def test_inspect_getmembers(self):
+         # GH38782
+         td.versioned_importorskip("jinja2")
++        td.versioned_importorskip("pyarrow")
+         ser = Series(dtype=object)
+         msg = "Series._data is deprecated"
+         with tm.assert_produces_warning(
diff --git a/patches/allow_no_matplotlib.patch b/patches/allow_no_matplotlib.patch
new file mode 100644 (file)
index 0000000..b153124
--- /dev/null
@@ -0,0 +1,24 @@
+Description: Don't try to run matplotlib-using tests without it
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: no
+
+--- a/pandas/tests/io/formats/style/test_style.py
++++ b/pandas/tests/io/formats/style/test_style.py
+@@ -14,6 +14,7 @@ from pandas import (
+     option_context,
+ )
+ import pandas._testing as tm
++import pandas.util._test_decorators as td
+ jinja2 = pytest.importorskip("jinja2")
+ from pandas.io.formats.style import (  # isort:skip
+@@ -756,7 +757,7 @@ class TestStyler:
+         df.style.map(color_negative_red, subset=pct_subset)
+     @pytest.mark.parametrize(
+-        "stylefunc", ["background_gradient", "bar", "text_gradient"]
++        "stylefunc", [pytest.param("background_gradient", marks=td.skip_if_no("matplotlib")), "bar", pytest.param("text_gradient", marks=td.skip_if_no("matplotlib"))]
+     )
+     def test_subset_for_boolean_cols(self, stylefunc):
+         # GH47838
diff --git a/patches/allow_no_openpyxl.patch b/patches/allow_no_openpyxl.patch
new file mode 100644 (file)
index 0000000..9164b5f
--- /dev/null
@@ -0,0 +1,25 @@
+Description: Mark tests that need openpyxl
+
+(These do the usual loop through engine/read_ext but
+actually use .xlsx every time)
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: no
+
+--- a/pandas/tests/io/excel/test_readers.py
++++ b/pandas/tests/io/excel/test_readers.py
+@@ -1671,12 +1671,14 @@ class TestExcelFileRead:
+         expected = pd.read_excel("test1" + read_ext, engine=engine)
+         tm.assert_frame_equal(result, expected)
++    @td.skip_if_no("openpyxl")
+     def test_read_excel_header_index_out_of_range(self, engine):
+         # GH#43143
+         with open("df_header_oob.xlsx", "rb") as f:
+             with pytest.raises(ValueError, match="exceeds maximum"):
+                 pd.read_excel(f, header=[0, 1])
++    @td.skip_if_no("openpyxl")
+     @pytest.mark.parametrize("filename", ["df_empty.xlsx", "df_equals.xlsx"])
+     def test_header_with_index_col(self, filename):
+         # GH 33476
diff --git a/patches/armel_ignore_nonwarning.patch b/patches/armel_ignore_nonwarning.patch
new file mode 100644 (file)
index 0000000..772b104
--- /dev/null
@@ -0,0 +1,53 @@
+Description: Don't require a warning armel numpy doesn't have
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: no (this version requires dpkg)
+
+--- a/pandas/tests/apply/test_str.py
++++ b/pandas/tests/apply/test_str.py
+@@ -70,6 +70,12 @@ def test_apply_np_reducer(op, how):
+ @pytest.mark.parametrize("how", ["transform", "apply"])
+ def test_apply_np_transformer(float_frame, op, how):
+     # GH 39116
++    # armel numpy currently doesn't have the invalid log/sqrt warning (see 1.4.3-1 build log,
++    # possibly the same underlying issue as statsmodels https://bugs.debian.org/956882)
++    # using nullcontext() instead of warn=None to not start failing if this ever gets fixed
++    import subprocess
++    import contextlib
++    debian_arch = subprocess.run(["dpkg","--print-architecture"],capture_output=True).stdout
+     # float_frame will _usually_ have negative values, which will
+     #  trigger the warning here, but let's put one in just to be sure
+@@ -78,7 +84,7 @@ def test_apply_np_transformer(float_fram
+     if op in ["log", "sqrt"]:
+         warn = RuntimeWarning
+-    with tm.assert_produces_warning(warn, check_stacklevel=False):
++    with (contextlib.nullcontext() if (debian_arch==b'armel\n') else tm.assert_produces_warning(warn, check_stacklevel=False)):
+         # float_frame fixture is defined in conftest.py, so we don't check the
+         # stacklevel as otherwise the test would fail.
+         result = getattr(float_frame, how)(op)
+--- a/pandas/tests/io/parser/test_c_parser_only.py
++++ b/pandas/tests/io/parser/test_c_parser_only.py
+@@ -63,6 +63,12 @@ def test_delim_whitespace_custom_termina
+     expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"])
+     tm.assert_frame_equal(df, expected)
++# armel numpy currently doesn't have some invalid warnings (see 2.0.3+dfsg-3 build log,
++# possibly the same underlying issue as statsmodels https://bugs.debian.org/956882)
++# using nullcontext() instead of warn=None to not start failing if this ever gets fixed
++import subprocess
++import contextlib
++debian_arch = subprocess.run(["dpkg","--print-architecture"],capture_output=True).stdout
+ def test_dtype_and_names_error(c_parser_only):
+     # see gh-8833: passing both dtype and names
+@@ -98,7 +104,7 @@ nan 2
+     # fallback casting, but not castable
+     warning = RuntimeWarning if np_version_gte1p24 else None
+     with pytest.raises(ValueError, match="cannot safely convert"):
+-        with tm.assert_produces_warning(warning, check_stacklevel=False):
++        with (contextlib.nullcontext() if (debian_arch==b'armel\n') else tm.assert_produces_warning(warning, check_stacklevel=False)):
+             parser.read_csv(
+                 StringIO(data),
+                 sep=r"\s+",
diff --git a/patches/blosc_nonstrict_xfail.patch b/patches/blosc_nonstrict_xfail.patch
new file mode 100644 (file)
index 0000000..fe696d9
--- /dev/null
@@ -0,0 +1,19 @@
+Description: Use nonstrict xfail
+
+Upstream strict-xfailed this instead of changing the expected message,
+which doesn't work here because it only fails in build, not autopkgtest
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: no
+
+--- a/pandas/tests/io/pytables/test_file_handling.py
++++ b/pandas/tests/io/pytables/test_file_handling.py
+@@ -270,7 +270,7 @@ def test_complibs(tmp_path, lvl, lib, re
+     # GH14478
+     if PY311 and is_platform_linux() and lib == "blosc2" and lvl != 0:
+         request.applymarker(
+-            pytest.mark.xfail(reason=f"Fails for {lib} on Linux and PY > 3.11")
++            pytest.mark.xfail(reason=f"Fails for {lib} on Linux and PY > 3.11", strict=False)
+         )
+     df = DataFrame(
+         np.ones((30, 4)), columns=list("ABCD"), index=np.arange(30).astype(np.str_)
diff --git a/patches/contributor_list_not_in_tarball.patch b/patches/contributor_list_not_in_tarball.patch
new file mode 100644 (file)
index 0000000..460313b
--- /dev/null
@@ -0,0 +1,28 @@
+Description: Don't try to read a contributor list from the git log
+
+Debian packages are built from tarballs, so there isn't a git log.
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: not-needed
+
+--- a/doc/sphinxext/contributors.py
++++ b/doc/sphinxext/contributors.py
+@@ -14,10 +14,8 @@ use::
+ While the v0.23.1 tag does not exist, that will use the HEAD of the
+ branch as the end of the revision range.
+ """
+-from announce import build_components
+ from docutils import nodes
+ from docutils.parsers.rst import Directive
+-import git
+ class ContributorsDirective(Directive):
+@@ -25,6 +23,7 @@ class ContributorsDirective(Directive):
+     name = "contributors"
+     def run(self):
++        return [nodes.paragraph(), nodes.Text("For contributors, please see /usr/share/doc/contributors_list.txt or https://github.com/pandas-dev/pandas/graphs/contributors")]
+         range_ = self.arguments[0]
+         if range_.endswith("x..HEAD"):
+             return [nodes.paragraph(), nodes.bullet_list()]
diff --git a/patches/deb_disable_analytics.patch b/patches/deb_disable_analytics.patch
new file mode 100644 (file)
index 0000000..2bc2606
--- /dev/null
@@ -0,0 +1,88 @@
+Description: Avoid privacy breach by analytics
+
+Author: Yaroslav Halchenko <debian@onerussian.com>,
+        Andreas Tille <tille@debian.org>,
+        Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: not-needed
+
+--- a/pandas/tests/io/data/html/spam.html
++++ b/pandas/tests/io/data/html/spam.html
+@@ -27,45 +27,9 @@
+ <link rel="stylesheet" href="/ndb/static/css/main.css" />
+-              <script type="text/JavaScript">
+-               var _gaq = _gaq || [];
+-               // NAL
+-                _gaq.push(['_setAccount', 'UA-28627214-1']);
+-                _gaq.push(['_setDomainName', 'nal.usda.gov']);
+-                _gaq.push(['_setAllowLinker', true]);
+-                _gaq.push(['_trackPageview']);
+-               //
+-              // _gaq.push(['_setAccount', 'UA-3876418-1']);
+-              //  _gaq.push(['_trackPageview']);
+-                // for NDB
+-                _gaq.push(['_setAccount', 'UA-36442725-1']);
+-                _gaq.push(['_trackPageview']);
+-                      // USDA servers
+-                _gaq.push(['_setAccount', 'UA-466807-3']);
+-                _gaq.push(['_setDomainName', 'usda.gov']);
+-                _gaq.push(['_setAllowLinker', true]);
+-                _gaq.push(['_trackPageview']);
+-                //
+-                _gaq.push(['a._setAccount', 'UA-27627304-18']);
+-                _gaq.push(['a._setDomainName', 'usda.gov']);
+-                _gaq.push(['a._setAllowLinker', true]);
+-                _gaq.push(['a._trackPageview']);
+-                      //
+-                _gaq.push(['b._setAccount', 'UA-27627304-1']);
+-                _gaq.push(['b._setDomainName', 'usda.gov']);
+-                _gaq.push(['b._setAllowLinker', true]);
+-                _gaq.push(['b._trackPageview']);
+-
+-                (function() {
+-                  var ga = document.createElement('script'); ga.type =
+-              'text/javascript'; ga.async = true;
+-                  ga.src = ('https:' == document.location.protocol ? 'https://ssl' :
+-              'http://www') + '.google-analytics.com/ga.js';
+-                  var s = document.getElementsByTagName('script')[0];
+-              s.parentNode.insertBefore(ga, s);
+-                })();
+-      </script>
+-
++<!-- google analytics snippet was completely removed by Debian maintainers.
++     See http://lintian.debian.org/tags/privacy-breach-google-adsense.html
++     for more information -->
+         <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
+@@ -794,4 +758,4 @@ handler: function() {this.cancel();},
+                       &nbsp;&nbsp;Software v.1.2.2
+               </div>
+     </body>
+-</html>
+\ No newline at end of file
++</html>
+--- a/web/pandas/_templates/layout.html
++++ b/web/pandas/_templates/layout.html
+@@ -1,7 +1,6 @@
+ <!DOCTYPE html>
+ <html>
+     <head>
+-        <script defer data-domain="pandas.pydata.org" src="https://views.scientific-python.org/js/script.js"></script>
+         <title>pandas - Python Data Analysis Library</title>
+         <meta charset="utf-8">
+         <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
+--- a/doc/source/conf.py
++++ b/doc/source/conf.py
+@@ -243,10 +243,6 @@ html_theme_options = {
+     "footer_start": ["pandas_footer", "sphinx-version"],
+     "github_url": "https://github.com/pandas-dev/pandas",
+     "twitter_url": "https://twitter.com/pandas_dev",
+-    "analytics": {
+-        "plausible_analytics_domain": "pandas.pydata.org",
+-        "plausible_analytics_url": "https://views.scientific-python.org/js/script.js",
+-    },
+     "logo": {"image_dark": "https://pandas.pydata.org/static/img/pandas_white.svg"},
+     "navbar_align": "left",
+     "navbar_end": ["version-switcher", "theme-switcher", "navbar-icon-links"],
diff --git a/patches/deb_doc_donotoverride_PYTHONPATH.patch b/patches/deb_doc_donotoverride_PYTHONPATH.patch
new file mode 100644 (file)
index 0000000..0d1081e
--- /dev/null
@@ -0,0 +1,21 @@
+Description: Don't try to import from the source directory
+
+Needed as we build the extension modules elsewhere
+
+Author: Yaroslav Halchenko
+Forwarded: not-needed
+
+--- a/doc/make.py
++++ b/doc/make.py
+@@ -368,8 +368,9 @@ def main():
+     # external libraries (namely Sphinx) to compile this module and resolve
+     # the import of `python_path` correctly. The latter is used to resolve
+     # the import within the module, injecting it into the global namespace
+-    os.environ["PYTHONPATH"] = args.python_path
+-    sys.path.insert(0, args.python_path)
++    # Debian: we set it outside
++    #os.environ["PYTHONPATH"] = args.python_path
++    #sys.path.insert(0, args.python_path)
+     globals()["pandas"] = importlib.import_module("pandas")
+     # Set the matplotlib backend to the non-interactive Agg backend for all
diff --git a/patches/deb_nonversioneer_version.patch b/patches/deb_nonversioneer_version.patch
new file mode 100644 (file)
index 0000000..bb712d7
--- /dev/null
@@ -0,0 +1,57 @@
+Description: Don't try to use git to find the version number
+
+Needed as Debian buildds use tarballs
+
+Author: Yaroslav Halchenko
+Forwarded: not-needed
+
+--- a/pandas/__init__.py
++++ b/pandas/__init__.py
+@@ -186,12 +186,7 @@ try:
+     _built_with_meson = True
+ except ImportError:
+-    from pandas._version import get_versions
+-
+-    v = get_versions()
+-    __version__ = v.get("closest-tag", v["version"])
+-    __git_version__ = v.get("full-revisionid")
+-    del get_versions, v
++    from .__version import version as __version__
+ # GH#55043 - deprecation of the data_manager option
+ if "PANDAS_DATA_MANAGER" in os.environ:
+--- a/pandas/tests/api/test_api.py
++++ b/pandas/tests/api/test_api.py
+@@ -193,8 +193,6 @@ class TestPDApi(Base):
+         "_testing",
+         "_typing",
+     ]
+-    if not pd._built_with_meson:
+-        private_modules.append("_version")
+     def test_api(self):
+         checkthese = (
+--- a/pandas/tests/test_common.py
++++ b/pandas/tests/test_common.py
+@@ -159,7 +159,7 @@ def test_standardize_mapping():
+     dd = collections.defaultdict(list)
+     assert isinstance(com.standardize_mapping(dd), partial)
+-
++@pytest.mark.xfail(reason="deb_nonversioneer_version patch")
+ def test_git_version():
+     # GH 21295
+     git_version = pd.__git_version__
+--- a/pandas/tests/util/test_show_versions.py
++++ b/pandas/tests/util/test_show_versions.py
+@@ -55,9 +55,6 @@ def test_show_versions_console(capsys):
+     # check header
+     assert "INSTALLED VERSIONS" in result
+-    # check full commit hash
+-    assert re.search(r"commit\s*:\s[0-9a-f]{40}\n", result)
+-
+     # check required dependency
+     # 2020-12-09 npdev has "dirty" in the tag
+     # 2022-05-25 npdev released with RC wo/ "dirty".
diff --git a/patches/find_test_data.patch b/patches/find_test_data.patch
new file mode 100644 (file)
index 0000000..0a82963
--- /dev/null
@@ -0,0 +1,143 @@
+Description: Allow tests to use the data files in the source tree
+
+We don't ship these in the package,
+but do want to run the tests that use them
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: https://github.com/pandas-dev/pandas/issues/54907
+
+--- a/pandas/conftest.py
++++ b/pandas/conftest.py
+@@ -34,6 +34,7 @@ from typing import (
+     TYPE_CHECKING,
+     Callable,
+ )
++import argparse
+ from dateutil.tz import (
+     tzlocal,
+@@ -114,6 +115,7 @@ def pytest_addoption(parser) -> None:
+         action="store_false",
+         help="Don't fail if a test is skipped for missing data file.",
+     )
++    parser.addoption("--deb-data-root-dir", action="store", help=argparse.SUPPRESS)  # for internal use of the Debian CI infrastructure, may change without warning.  Security note: test_pickle can run arbitrary code from this directory
+ def ignore_doctest_warning(item: pytest.Item, path: str, message: str) -> None:
+@@ -1098,7 +1100,7 @@ def strict_data_files(pytestconfig):
+ @pytest.fixture
+-def datapath(strict_data_files: str) -> Callable[..., str]:
++def datapath(strict_data_files: str, pytestconfig) -> Callable[..., str]:
+     """
+     Get the path to a data file.
+@@ -1116,7 +1118,9 @@ def datapath(strict_data_files: str) ->
+     ValueError
+         If the path doesn't exist and the --no-strict-data-files option is not set.
+     """
+-    BASE_PATH = os.path.join(os.path.dirname(__file__), "tests")
++    BASE_PATH = pytestconfig.getoption("--deb-data-root-dir", default=None)
++    if BASE_PATH is None:
++        BASE_PATH = os.path.join(os.path.dirname(__file__), "tests")
+     def deco(*args):
+         path = os.path.join(BASE_PATH, *args)
+--- a/pandas/tests/util/test_util.py
++++ b/pandas/tests/util/test_util.py
+@@ -35,6 +35,7 @@ def test_datapath_missing(datapath):
+         datapath("not_a_file")
++@pytest.mark.xfail(reason="--deb-data-root-dir intentionally breaks this", strict=False)
+ def test_datapath(datapath):
+     args = ("io", "data", "csv", "iris.csv")
+--- a/pandas/tests/io/test_pickle.py
++++ b/pandas/tests/io/test_pickle.py
+@@ -115,7 +115,7 @@ def test_pickles(datapath):
+         pytest.skip("known failure on non-little endian")
+     # For loop for compat with --strict-data-files
+-    for legacy_pickle in Path(__file__).parent.glob("data/legacy_pickle/*/*.p*kl*"):
++    for legacy_pickle in Path(datapath("io", "data", "legacy_pickle")).glob("*/*.p*kl*"):
+         legacy_pickle = datapath(legacy_pickle)
+         data = pd.read_pickle(legacy_pickle)
+@@ -627,7 +627,7 @@ def test_pickle_big_dataframe_compressio
+ def test_pickle_frame_v124_unpickle_130(datapath):
+     # GH#42345 DataFrame created in 1.2.x, unpickle in 1.3.x
+     path = datapath(
+-        Path(__file__).parent,
++        "io",
+         "data",
+         "legacy_pickle",
+         "1.2.4",
+--- a/pandas/tests/io/formats/style/test_html.py
++++ b/pandas/tests/io/formats/style/test_html.py
+@@ -44,10 +44,10 @@ def tpl_table(env):
+     return env.get_template("html_table.tpl")
+-def test_html_template_extends_options():
++def test_html_template_extends_options(datapath):
+     # make sure if templates are edited tests are updated as are setup fixtures
+     # to understand the dependency
+-    with open("pandas/io/formats/templates/html.tpl", encoding="utf-8") as file:
++    with open(datapath("../io/formats/templates/html.tpl"), encoding="utf-8") as file:
+         result = file.read()
+     assert "{% include html_style_tpl %}" in result
+     assert "{% include html_table_tpl %}" in result
+--- a/pandas/tests/io/xml/conftest.py
++++ b/pandas/tests/io/xml/conftest.py
+@@ -4,8 +4,8 @@ import pytest
+ @pytest.fixture
+-def xml_data_path():
+-    return Path(__file__).parent.parent / "data" / "xml"
++def xml_data_path(datapath):
++    return Path(datapath("io", "data", "xml"))
+ @pytest.fixture
+--- a/pandas/tests/io/xml/test_xml.py
++++ b/pandas/tests/io/xml/test_xml.py
+@@ -487,13 +487,13 @@ def test_empty_string_etree(val):
+             read_xml(BytesIO(val), parser="etree")
+-def test_wrong_file_path(parser):
++def test_wrong_file_path(parser, datapath):
+     msg = (
+         "Passing literal xml to 'read_xml' is deprecated and "
+         "will be removed in a future version. To read from a "
+         "literal string, wrap it in a 'StringIO' object."
+     )
+-    filename = os.path.join("data", "html", "books.xml")
++    filename = os.path.join(datapath("io", "data", "html"), "books.xml")
+     with pytest.raises(
+         FutureWarning,
+@@ -1358,17 +1358,16 @@ def test_stylesheet_with_etree(kml_cta_r
+ @pytest.mark.parametrize("val", ["", b""])
+-def test_empty_stylesheet(val):
++def test_empty_stylesheet(val, datapath):
+     pytest.importorskip("lxml")
+     msg = (
+         "Passing literal xml to 'read_xml' is deprecated and "
+         "will be removed in a future version. To read from a "
+         "literal string, wrap it in a 'StringIO' object."
+     )
+-    kml = os.path.join("data", "xml", "cta_rail_lines.kml")
++    kml = datapath("io", "data", "xml", "cta_rail_lines.kml")
+-    with pytest.raises(FutureWarning, match=msg):
+-        read_xml(kml, stylesheet=val)
++    read_xml(kml, stylesheet=val)
+ # ITERPARSE
diff --git a/patches/fix_overly_arch_specific_xfails.patch b/patches/fix_overly_arch_specific_xfails.patch
new file mode 100644 (file)
index 0000000..ae84a13
--- /dev/null
@@ -0,0 +1,73 @@
+Description: Fix arch-specific upstream xfails
+
+We test on more architectures, so upstream's xfails are not always
+correct everywhere.  On those known to fail:
+arm64 xfail -> all non-x86 xfail
+x86 or unconditional strict xfail -> unconditional nonstrict xfail
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Bug: https://github.com/pandas-dev/pandas/issues/38921, https://github.com/pandas-dev/pandas/issues/38798, https://github.com/pandas-dev/pandas/issues/41740, https://github.com/numpy/numpy/issues/19146
+Forwarded: no
+
+--- a/pandas/tests/io/parser/test_c_parser_only.py
++++ b/pandas/tests/io/parser/test_c_parser_only.py
+@@ -17,6 +17,7 @@ import tarfile
+ import numpy as np
+ import pytest
++from pandas.compat import IS64
+ from pandas.compat.numpy import np_version_gte1p24
+ from pandas.errors import (
+     ParserError,
+@@ -29,6 +30,9 @@ from pandas import (
+     concat,
+ )
+ import pandas._testing as tm
++import platform
++import re
++is_platform_x86 = bool(re.match("i.?86|x86",platform.uname()[4]))
+ @pytest.mark.parametrize(
+@@ -633,11 +637,13 @@ def test_float_precision_options(c_parse
+     tm.assert_frame_equal(df, df2)
+-    df3 = parser.read_csv(StringIO(s), float_precision="legacy")
+-
+-    assert not df.iloc[0, 0] == df3.iloc[0, 0]
+-
+     msg = "Unrecognized float_precision option: junk"
+     with pytest.raises(ValueError, match=msg):
+         parser.read_csv(StringIO(s), float_precision="junk")
++
++    df3 = parser.read_csv(StringIO(s), float_precision="legacy")
++    if is_platform_x86 and (not IS64) and (df.iloc[0, 0] == df3.iloc[0, 0]):
++        pytest.xfail(reason="maybe x87 extra precision")
++
++    assert not df.iloc[0, 0] == df3.iloc[0, 0]
+--- a/pandas/tests/window/test_rolling.py
++++ b/pandas/tests/window/test_rolling.py
+@@ -10,7 +10,11 @@ from pandas.compat import (
+     IS64,
+     is_platform_arm,
+     is_platform_power,
++    IS64,
+ )
++import platform
++import re
++is_platform_x86 = bool(re.match("i.?86|x86", platform.uname()[4]))
+ from pandas import (
+     DataFrame,
+@@ -1176,7 +1180,8 @@ def test_rolling_sem(frame_or_series):
+ @pytest.mark.xfail(
+-    is_platform_arm() or is_platform_power(),
++    not (is_platform_x86 and IS64),
++    strict=False,
+     reason="GH 38921",
+ )
+ @pytest.mark.parametrize(
diff --git a/patches/fix_random_seeds.patch b/patches/fix_random_seeds.patch
new file mode 100644 (file)
index 0000000..81b9d22
--- /dev/null
@@ -0,0 +1,175 @@
+Description: Use fixed seeds for reproducible pseudorandomness
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: no
+
+--- a/doc/source/getting_started/comparison/comparison_with_r.rst
++++ b/doc/source/getting_started/comparison/comparison_with_r.rst
+@@ -237,6 +237,7 @@ In pandas we may use :meth:`~pandas.pivo
+    import random
+    import string
++   random.seed(123456) # for reproducibility
+    baseball = pd.DataFrame(
+        {
+--- a/doc/source/user_guide/advanced.rst
++++ b/doc/source/user_guide/advanced.rst
+@@ -590,6 +590,7 @@ they need to be sorted. As with any inde
+    import random
++   random.seed(123456) # for reproducibility
+    random.shuffle(tuples)
+    s = pd.Series(np.random.randn(8), index=pd.MultiIndex.from_tuples(tuples))
+    s
+--- a/doc/source/user_guide/visualization.rst
++++ b/doc/source/user_guide/visualization.rst
+@@ -1086,6 +1086,7 @@ are what constitutes the bootstrap plot.
+    :suppress:
+    np.random.seed(123456)
++   random.seed(123456) # for reproducibility - bootstrap_plot uses random.sample
+ .. ipython:: python
+--- a/pandas/plotting/_core.py
++++ b/pandas/plotting/_core.py
+@@ -604,6 +604,7 @@ def boxplot_frame_groupby(
+     .. plot::
+         :context: close-figs
++        >>> np.random.seed(1234)
+         >>> import itertools
+         >>> tuples = [t for t in itertools.product(range(1000), range(4))]
+         >>> index = pd.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1'])
+@@ -1328,6 +1329,7 @@ class PlotAccessor(PandasObject):
+         .. plot::
+             :context: close-figs
++            >>> np.random.seed(1234)
+             >>> data = np.random.randn(25, 4)
+             >>> df = pd.DataFrame(data, columns=list('ABCD'))
+             >>> ax = df.plot.box()
+@@ -1392,6 +1394,7 @@ class PlotAccessor(PandasObject):
+         .. plot::
+             :context: close-figs
++            >>> np.random.seed(1234)
+             >>> df = pd.DataFrame(np.random.randint(1, 7, 6000), columns=['one'])
+             >>> df['two'] = df['one'] + np.random.randint(1, 7, 6000)
+             >>> ax = df.plot.hist(bins=12, alpha=0.5)
+@@ -1811,6 +1814,7 @@ class PlotAccessor(PandasObject):
+         .. plot::
+             :context: close-figs
++            >>> np.random.seed(1234)
+             >>> n = 10000
+             >>> df = pd.DataFrame({'x': np.random.randn(n),
+             ...                    'y': np.random.randn(n)})
+--- a/pandas/plotting/_misc.py
++++ b/pandas/plotting/_misc.py
+@@ -438,6 +438,8 @@ def bootstrap_plot(
+     .. plot::
+         :context: close-figs
++        >>> np.random.seed(1234)
++        >>> random.seed(1234)  # for reproducibility
+         >>> s = pd.Series(np.random.uniform(size=100))
+         >>> pd.plotting.bootstrap_plot(s)  # doctest: +SKIP
+         <Figure size 640x480 with 6 Axes>
+@@ -597,6 +599,7 @@ def autocorrelation_plot(series: Series,
+     .. plot::
+         :context: close-figs
++        >>> np.random.seed(1234)
+         >>> spacing = np.linspace(-9 * np.pi, 9 * np.pi, num=1000)
+         >>> s = pd.Series(0.7 * np.random.rand(1000) + 0.3 * np.sin(spacing))
+         >>> pd.plotting.autocorrelation_plot(s)  # doctest: +SKIP
+--- a/doc/source/user_guide/style.ipynb
++++ b/doc/source/user_guide/style.ipynb
+@@ -78,8 +78,37 @@
+    "source": [
+     "import pandas as pd\n",
+     "import numpy as np\n",
+-    "import matplotlib as mpl\n",
+-    "\n",
++    "import matplotlib as mpl\n"
++   ]
++  },
++  {
++   "cell_type": "code",
++   "execution_count": null,
++   "metadata": {
++    "nbsphinx": "hidden"
++   },
++   "outputs": [],
++   "source": [
++    "# For reproducibility - this doesn't respect uuid_len or positionally-passed uuid but the places here that use that coincidentally bypass this anyway\n",
++    "from pandas.io.formats.style import Styler\n",
++    "next_uuid = 1000\n",
++    "class StylerReproducible(Styler):\n",
++    "    def __init__(self, *args, uuid=None, **kwargs):\n",
++    "        global next_uuid\n",
++    "        if uuid is None:\n",
++    "            uuid = str(next_uuid)\n",
++    "            next_uuid = next_uuid + 1\n",
++    "        super().__init__(*args, uuid=uuid, **kwargs)\n",
++    "Styler = StylerReproducible\n",
++    "pd.DataFrame.style = property(lambda self: StylerReproducible(self))\n"
++   ]
++  },
++  {
++   "cell_type": "code",
++   "execution_count": null,
++   "metadata": {},
++   "outputs": [],
++   "source": [
+     "df = pd.DataFrame({\n",
+     "    \"strings\": [\"Adam\", \"Mike\"],\n",
+     "    \"ints\": [1, 3],\n",
+@@ -104,6 +133,7 @@
+    "metadata": {},
+    "outputs": [],
+    "source": [
++    "np.random.seed(25)  # for reproducibility\n",
+     "weather_df = pd.DataFrame(np.random.rand(10,2)*5, \n",
+     "                          index=pd.date_range(start=\"2021-01-01\", periods=10),\n",
+     "                          columns=[\"Tokyo\", \"Beijing\"])\n",
+@@ -1394,7 +1424,6 @@
+    "outputs": [],
+    "source": [
+     "# Hide the construction of the display chart from the user\n",
+-    "import pandas as pd\n",
+     "from IPython.display import HTML\n",
+     "\n",
+     "# Test series\n",
+@@ -1926,6 +1955,18 @@
+    ]
+   },
+   {
++   "cell_type": "code",
++   "execution_count": null,
++   "metadata": {
++    "nbsphinx": "hidden"
++   },
++   "outputs": [],
++   "source": [
++    "# For reproducibility\n",
++    "Styler = StylerReproducible\n"
++   ]
++  },
++  {
+    "cell_type": "markdown",
+    "metadata": {},
+    "source": [
+@@ -2126,7 +2167,8 @@
+    "nbconvert_exporter": "python",
+    "pygments_lexer": "ipython3",
+    "version": "3.9.5"
+-  }
++  },
++  "record_timing": false
+  },
+  "nbformat": 4,
+  "nbformat_minor": 1
diff --git a/patches/hurd_compat.patch b/patches/hurd_compat.patch
new file mode 100644 (file)
index 0000000..085600d
--- /dev/null
@@ -0,0 +1,125 @@
+Description: Avoid test failures on Hurd
+
+Allow multiprocessing to be unavailable
+Accept any errno not just 2 for (intentionally) nonexistent files
+(Hurd appears to use 2**30+2)
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: no
+
+--- a/pandas/tests/io/parser/common/test_file_buffer_url.py
++++ b/pandas/tests/io/parser/common/test_file_buffer_url.py
+@@ -100,7 +100,7 @@ def test_nonexistent_path(all_parsers):
+     parser = all_parsers
+     path = f"{uuid.uuid4()}.csv"
+-    msg = r"\[Errno 2\]"
++    msg = r"\[Errno 2\]|\[Errno [0-9]+\] No such file or directory"
+     with pytest.raises(FileNotFoundError, match=msg) as e:
+         parser.read_csv(path)
+     assert path == e.value.filename
+@@ -111,7 +111,7 @@ def test_no_permission(all_parsers):
+     # GH 23784
+     parser = all_parsers
+-    msg = r"\[Errno 13\]"
++    msg = r"\[Errno 13\]|\[Errno [0-9]+\] Permission denied"
+     with tm.ensure_clean() as path:
+         os.chmod(path, 0)  # make file unreadable
+--- a/pandas/tests/io/parser/common/test_float.py
++++ b/pandas/tests/io/parser/common/test_float.py
+@@ -67,7 +67,7 @@ def test_too_many_exponent_digits(all_pa
+     data = f"data\n10E{exp}"
+     result = parser.read_csv(StringIO(data), float_precision=precision)
+     if precision == "round_trip":
+-        if exp == 999999999999999999 and is_platform_linux():
++        if exp == 999999999999999999:
+             mark = pytest.mark.xfail(reason="GH38794, on Linux gives object result")
+             request.applymarker(mark)
+--- a/pandas/tests/io/parser/test_multi_thread.py
++++ b/pandas/tests/io/parser/test_multi_thread.py
+@@ -4,7 +4,13 @@ parsing files for each parser defined in
+ """
+ from contextlib import ExitStack
+ from io import BytesIO
+-from multiprocessing.pool import ThreadPool
++import pytest
++try:
++    from multiprocessing.pool import ThreadPool
++    with ThreadPool():
++        pass
++except ImportError:
++    pytest.skip("multiprocessing not available",allow_module_level=True)
+ import numpy as np
+ import pytest
+--- a/pandas/tests/test_downstream.py
++++ b/pandas/tests/test_downstream.py
+@@ -31,6 +31,12 @@ def df():
+ def test_dask(df):
++    try:
++        from multiprocessing.pool import ThreadPool
++        with ThreadPool():
++            pass
++    except ImportError:
++        pytest.skip("multiprocessing not available")
+     # dask sets "compute.use_numexpr" to False, so catch the current value
+     # and ensure to reset it afterwards to avoid impacting other tests
+     olduse = pd.get_option("compute.use_numexpr")
+--- a/pandas/tests/io/test_common.py
++++ b/pandas/tests/io/test_common.py
+@@ -200,16 +200,16 @@ Look,a snake,🐍"""
+         path = os.path.join(HERE, "data", "does_not_exist." + fn_ext)
+         msg1 = rf"File (b')?.+does_not_exist\.{fn_ext}'? does not exist"
+-        msg2 = rf"\[Errno 2\] No such file or directory: '.+does_not_exist\.{fn_ext}'"
++        msg2 = rf"\[Errno [0-9]+\] No such file or directory: '.+does_not_exist\.{fn_ext}'"
+         msg3 = "Expected object or value"
+         msg4 = "path_or_buf needs to be a string file path or file-like"
+         msg5 = (
+-            rf"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist: "
++            rf"\[Errno [0-9]+\] File .+does_not_exist\.{fn_ext} does not exist: "
+             rf"'.+does_not_exist\.{fn_ext}'"
+         )
+-        msg6 = rf"\[Errno 2\] 没有那个文件或目录: '.+does_not_exist\.{fn_ext}'"
++        msg6 = rf"\[Errno [0-9]+\] 没有那个文件或目录: '.+does_not_exist\.{fn_ext}'"
+         msg7 = (
+-            rf"\[Errno 2\] File o directory non esistente: '.+does_not_exist\.{fn_ext}'"
++            rf"\[Errno [0-9]+\] File o directory non esistente: '.+does_not_exist\.{fn_ext}'"
+         )
+         msg8 = rf"Failed to open local file.+does_not_exist\.{fn_ext}"
+@@ -270,16 +270,16 @@ Look,a snake,🐍"""
+         monkeypatch.setattr(icom, "_expand_user", lambda x: os.path.join("foo", x))
+         msg1 = rf"File (b')?.+does_not_exist\.{fn_ext}'? does not exist"
+-        msg2 = rf"\[Errno 2\] No such file or directory: '.+does_not_exist\.{fn_ext}'"
++        msg2 = rf"\[Errno [0-9]+\] No such file or directory: '.+does_not_exist\.{fn_ext}'"
+         msg3 = "Unexpected character found when decoding 'false'"
+         msg4 = "path_or_buf needs to be a string file path or file-like"
+         msg5 = (
+-            rf"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist: "
++            rf"\[Errno [0-9]+\] File .+does_not_exist\.{fn_ext} does not exist: "
+             rf"'.+does_not_exist\.{fn_ext}'"
+         )
+-        msg6 = rf"\[Errno 2\] 没有那个文件或目录: '.+does_not_exist\.{fn_ext}'"
++        msg6 = rf"\[Errno [0-9]+\] 没有那个文件或目录: '.+does_not_exist\.{fn_ext}'"
+         msg7 = (
+-            rf"\[Errno 2\] File o directory non esistente: '.+does_not_exist\.{fn_ext}'"
++            rf"\[Errno [0-9]+\] File o directory non esistente: '.+does_not_exist\.{fn_ext}'"
+         )
+         msg8 = rf"Failed to open local file.+does_not_exist\.{fn_ext}"
+@@ -610,7 +610,7 @@ def test_bad_encdoing_errors():
+ def test_errno_attribute():
+     # GH 13872
+-    with pytest.raises(FileNotFoundError, match="\\[Errno 2\\]") as err:
++    with pytest.raises(FileNotFoundError, match="\\[Errno [0-9]+\\]") as err:
+         pd.read_csv("doesnt_exist")
+         assert err.errno == errno.ENOENT
diff --git a/patches/ignore_ipython_exceptions.patch b/patches/ignore_ipython_exceptions.patch
new file mode 100644 (file)
index 0000000..bc7c1be
--- /dev/null
@@ -0,0 +1,18 @@
+Description: Ignore exceptions in documentation examples
+
+Some examples download data, and/or depend on
+packages Debian doesn't have, so can't run in a Debian build
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: not-needed
+
+--- a/doc/source/conf.py
++++ b/doc/source/conf.py
+@@ -472,6 +472,7 @@ extlinks = {
+ }
++ipython_warning_is_error = False
+ ipython_execlines = [
+     "import numpy as np",
+     "import pandas as pd",
diff --git a/patches/ignore_python3p12_deprecations.patch b/patches/ignore_python3p12_deprecations.patch
new file mode 100644 (file)
index 0000000..fe70cfe
--- /dev/null
@@ -0,0 +1,32 @@
+Description: Ignore DeprecationWarnings from Python 3.12
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: no
+
+--- a/pyproject.toml
++++ b/pyproject.toml
+@@ -521,6 +521,8 @@ filterwarnings = [
+   "ignore:distutils Version classes are deprecated:DeprecationWarning:fsspec",
+   # Can be removed once https://github.com/numpy/numpy/pull/24794 is merged
+   "ignore:.*In the future `np.long` will be defined as.*:FutureWarning",
++  "ignore:Pickle, copy, and deepcopy support will be removed from itertools.*:DeprecationWarning",
++  "ignore:Bitwise inversion.*on bool.*:DeprecationWarning",
+ ]
+ junit_family = "xunit2"
+ markers = [
+--- a/pandas/tests/computation/test_eval.py
++++ b/pandas/tests/computation/test_eval.py
+@@ -569,11 +569,11 @@ class TestEval:
+         assert pd.eval("-1", parser=parser, engine=engine) == -1
+         assert pd.eval("+1", parser=parser, engine=engine) == +1
+         with tm.assert_produces_warning(
+-            warn, match="Bitwise inversion", check_stacklevel=False
++            warn, match="Bitwise inversion", check_stacklevel=False, raise_on_extra_warnings=False
+         ):
+             assert pd.eval("~True", parser=parser, engine=engine) == ~True
+         with tm.assert_produces_warning(
+-            warn, match="Bitwise inversion", check_stacklevel=False
++            warn, match="Bitwise inversion", check_stacklevel=False, raise_on_extra_warnings=False
+         ):
+             assert pd.eval("~False", parser=parser, engine=engine) == ~False
+         assert pd.eval("-True", parser=parser, engine=engine) == -True
diff --git a/patches/ignore_test_1094417.patch b/patches/ignore_test_1094417.patch
new file mode 100644 (file)
index 0000000..e29f3c9
--- /dev/null
@@ -0,0 +1,21 @@
+Description: Ignore failing test
+
+(to unblock transition, not a real fix)
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Bug-Debian: https://bugs.debian.org/1094417
+Forwarded: not-needed
+
+--- pandas-2.2.3+dfsg.orig/pandas/tests/test_downstream.py
++++ pandas-2.2.3+dfsg/pandas/tests/test_downstream.py
+@@ -235,8 +235,8 @@ def test_missing_required_dependency():
+ @pytest.mark.xfail(
+-    condition=not IS64,
+-    reason="dask has different nativesize-int vs int64 type rules",
++    condition=True,#not IS64,
++    reason="ignoring https://bugs.debian.org/1094417 to unblock transition",#"dask has different nativesize-int vs int64 type rules",
+     strict=False,
+ )
+ def test_frame_setitem_dask_array_into_new_col():
diff --git a/patches/mathjax-path.patch b/patches/mathjax-path.patch
new file mode 100644 (file)
index 0000000..f8a596c
--- /dev/null
@@ -0,0 +1,16 @@
+Description: Use Debian packaged mathjax
+
+Author: Andreas Tille <tille@debian.org>
+Forwarded: not-needed
+
+--- a/doc/source/conf.py
++++ b/doc/source/conf.py
+@@ -71,6 +71,8 @@ extensions = [
+     "nbsphinx",
+ ]
++mathjax_path="MathJax.js"
++
+ exclude_patterns = [
+     "**.ipynb_checkpoints",
+     # to ensure that include files (partial pages) aren't built, exclude them
diff --git a/patches/mips_pow_nan.patch b/patches/mips_pow_nan.patch
new file mode 100644 (file)
index 0000000..86599ce
--- /dev/null
@@ -0,0 +1,85 @@
+Description: On mips, 1**np.nan and np.nan**0 may be NaN not 1
+
+Done this way not a plain xfail to allow only this difference,
+not clearly wrong answers
+
+(The same hardware's "invalid value encountered" warnings,
+probably from sNaN/qNaN being reversed, are ignored elsewhere:
+d/rules and xfail_tests_nonintel_io.patch)
+
+https://en.wikipedia.org/wiki/NaN#Encoding
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: no
+
+--- a/pandas/tests/arrays/floating/test_arithmetic.py
++++ b/pandas/tests/arrays/floating/test_arithmetic.py
+@@ -1,4 +1,5 @@
+ import operator
++import platform
+ import numpy as np
+ import pytest
+@@ -69,6 +70,11 @@ def test_pow_scalar(dtype):
+         np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype=dtype.numpy_dtype),
+         mask=a._mask,
+     )
++    if 'mips' in platform.uname()[4] and np.isnan(result[2]):
++        expected = FloatingArray(
++            np.array([np.nan, np.nan, np.nan, np.nan, np.nan], dtype=dtype.numpy_dtype),
++            mask=a._mask,
++        )
+     tm.assert_extension_array_equal(result, expected)
+     # reversed
+@@ -80,6 +86,12 @@ def test_pow_scalar(dtype):
+     result = 1**a
+     expected = pd.array([1, 1, 1, 1], dtype=dtype)
++    if 'mips' in platform.uname()[4] and np.isnan(result[2]):
++        expected = FloatingArray(
++            np.array([1, 1, np.nan, 1], dtype=dtype.numpy_dtype),
++            mask=expected._mask,
++        )
++
+     tm.assert_extension_array_equal(result, expected)
+     result = pd.NA**a
+@@ -90,6 +102,11 @@ def test_pow_scalar(dtype):
+     expected = FloatingArray(
+         np.array([1, np.nan, np.nan, np.nan], dtype=dtype.numpy_dtype), mask=a._mask
+     )
++    if 'mips' in platform.uname()[4] and np.isnan(result[0]):
++        expected = FloatingArray(
++            np.array([np.nan, np.nan, np.nan, np.nan], dtype=dtype.numpy_dtype),
++            mask=a._mask,
++        )
+     tm.assert_extension_array_equal(result, expected)
+@@ -98,6 +115,8 @@ def test_pow_array(dtype):
+     b = pd.array([0, 1, None, 0, 1, None, 0, 1, None], dtype=dtype)
+     result = a**b
+     expected = pd.array([1, 0, None, 1, 1, 1, 1, None, None], dtype=dtype)
++    if 'mips' in platform.uname()[4] and np.isnan(result[5]):
++        expected = FloatingArray(np.array([1, 0, np.nan, 1, 1, np.nan, np.nan, np.nan, np.nan], dtype=dtype.numpy_dtype), mask=expected._mask)
+     tm.assert_extension_array_equal(result, expected)
+--- a/pandas/tests/arrays/sparse/test_arithmetics.py
++++ b/pandas/tests/arrays/sparse/test_arithmetics.py
+@@ -1,4 +1,5 @@
+ import operator
++import platform
+ import numpy as np
+ import pytest
+@@ -44,6 +45,8 @@ class TestSparseArrayArithmetics:
+                 result = op(a, b_dense).to_dense()
+             else:
+                 result = op(a, b).to_dense()
++        if 'mips' in platform.uname()[4] and op==operator.pow and a[1]==1 and np.isnan(b if np.isscalar(b) else b[1]) and np.isnan(expected[1]) and result[1]==1:
++            expected[1]=1
+         self._assert(result, expected)
diff --git a/patches/no_pkg_resources.patch b/patches/no_pkg_resources.patch
new file mode 100644 (file)
index 0000000..52a9eff
--- /dev/null
@@ -0,0 +1,25 @@
+Description: Stop using pkg_resources
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Bug-Debian: https://bugs.debian.org/1083523
+Forwarded: no, it would crash on Macs
+
+--- a/setup.py
++++ b/setup.py
+@@ -16,7 +16,6 @@ import sys
+ from sysconfig import get_config_vars
+ import numpy
+-from pkg_resources import parse_version
+ from setuptools import (
+     Command,
+     Extension,
+@@ -46,7 +45,7 @@ try:
+     )
+     from Cython.Build import cythonize
+-    _CYTHON_INSTALLED = parse_version(_CYTHON_VERSION) >= parse_version(min_cython_ver)
++    _CYTHON_INSTALLED = True
+ except ImportError:
+     _CYTHON_VERSION = None
+     _CYTHON_INSTALLED = False
diff --git a/patches/no_pytz_datetime.patch b/patches/no_pytz_datetime.patch
new file mode 100644 (file)
index 0000000..fc364e6
--- /dev/null
@@ -0,0 +1,45 @@
+Description: datetime does not work with non-constant pytz.timezone
+
+This has always been the case (and is explicitly warned about
+in the pytz documentation), but became a test fail when
+tzdata 2024b changed 'CET' and similar to aliases.
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: no
+
+--- a/pandas/tests/arrays/test_array.py
++++ b/pandas/tests/arrays/test_array.py
+@@ -272,7 +272,8 @@ def test_array_copy():
+     assert tm.shares_memory(a, b)
+-cet = pytz.timezone("CET")
++cetfixed = pytz.timezone("Etc/GMT-1") # the wrong-looking sign is because Etc/* use posix convention, as described in the tzdata source
++cetwithdst = pytz.timezone("Europe/Brussels")
+ @pytest.mark.parametrize(
+@@ -313,11 +314,20 @@ cet = pytz.timezone("CET")
+         ),
+         (
+             [
+-                datetime.datetime(2000, 1, 1, tzinfo=cet),
+-                datetime.datetime(2001, 1, 1, tzinfo=cet),
++                datetime.datetime(2000, 1, 1, tzinfo=cetfixed),
++                datetime.datetime(2001, 1, 1, tzinfo=cetfixed),
+             ],
+             DatetimeArray._from_sequence(
+-                ["2000", "2001"], dtype=pd.DatetimeTZDtype(tz=cet, unit="ns")
++                ["2000", "2001"], dtype=pd.DatetimeTZDtype(tz=cetfixed, unit="ns")
++            ),
++        ),
++        (
++            [
++                cetwithdst.localize(datetime.datetime(2000, 1, 1)),
++                cetwithdst.localize(datetime.datetime(2001, 1, 1)),
++            ],
++            DatetimeArray._from_sequence(
++                ["2000", "2001"], dtype=pd.DatetimeTZDtype(tz=cetwithdst, unit="ns")
+             ),
+         ),
+         # timedelta
diff --git a/patches/numba_fail_32bit.patch b/patches/numba_fail_32bit.patch
new file mode 100644 (file)
index 0000000..3a31e61
--- /dev/null
@@ -0,0 +1,167 @@
+Description: Allow some numba errors on 32-bit
+
+Specifying the exception type allows only explicit errors,
+not silently wrong answers
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: no
+
+--- a/pandas/tests/groupby/conftest.py
++++ b/pandas/tests/groupby/conftest.py
+@@ -11,6 +11,11 @@ from pandas.core.groupby.base import (
+     reduction_kernels,
+     transformation_kernels,
+ )
++from pandas.compat import IS64
++try:
++    from numba.core.errors import UnsupportedParforsError
++except ImportError:  # numba not installed
++    UnsupportedParforsError = ImportError
+ @pytest.fixture(params=[True, False])
+@@ -153,7 +158,22 @@ def groupby_func(request):
+     return request.param
+-@pytest.fixture(params=[True, False])
++# the xfail is because numba does not support this on 32-bit systems
++# https://github.com/numba/numba/blob/main/numba/parfors/parfors.py
++# strict=False because some tests are of error paths that
++# fail of something else before reaching this point
++@pytest.fixture(params=[
++                    pytest.param(
++                        True,
++                        marks=pytest.mark.xfail(
++                            condition=not IS64,
++                            reason="parfors not available on 32-bit",
++                            raises=UnsupportedParforsError,
++                            strict=False,
++                        )
++                    ),
++                    False,
++                ])
+ def parallel(request):
+     """parallel keyword argument for numba.jit"""
+     return request.param
+--- a/pandas/tests/window/conftest.py
++++ b/pandas/tests/window/conftest.py
+@@ -13,6 +13,12 @@ from pandas import (
+     Series,
+     bdate_range,
+ )
++from pandas.compat import IS64
++try:
++    from numba.core.errors import UnsupportedParforsError, TypingError
++except ImportError:  # numba not installed
++    UnsupportedParforsError = ImportError
++    TypingError = ImportError
+ @pytest.fixture(params=[True, False])
+@@ -50,7 +56,22 @@ def min_periods(request):
+     return request.param
+-@pytest.fixture(params=[True, False])
++# the xfail is because numba does not support this on 32-bit systems
++# https://github.com/numba/numba/blob/main/numba/parfors/parfors.py
++# strict=False because some tests are of error paths that
++# fail of something else before reaching this point
++@pytest.fixture(params=[
++                    pytest.param(
++                        True,
++                        marks=pytest.mark.xfail(
++                            condition=not IS64,
++                            reason="parfors not available on 32-bit",
++                            raises=(UnsupportedParforsError, TypingError),
++                            strict=False,
++                        )
++                    ),
++                    False,
++                ])
+ def parallel(request):
+     """parallel keyword argument for numba.jit"""
+     return request.param
+--- a/pandas/tests/window/test_numba.py
++++ b/pandas/tests/window/test_numba.py
+@@ -1,6 +1,12 @@
+ import numpy as np
+ import pytest
++from pandas.compat import IS64
++try:
++    from numba.core.errors import UnsupportedParforsError, TypingError
++except ImportError:  # numba not installed
++    UnsupportedParforsError = ImportError
++    TypingError = ImportError
+ from pandas.errors import NumbaUtilError
+ import pandas.util._test_decorators as td
+@@ -186,6 +192,12 @@ class TestEngine:
+         expected = DataFrame({"value": [2.0, 2.0, 2.0]})
+         tm.assert_frame_equal(result, expected)
++    @pytest.mark.xfail(
++        condition=not IS64,
++        reason="parfors not available on 32-bit",
++        raises=UnsupportedParforsError,
++        strict=False,
++    )
+     def test_dont_cache_engine_kwargs(self):
+         # If the user passes a different set of engine_kwargs don't return the same
+         # jitted function
+@@ -326,6 +338,12 @@ class TestTableMethod:
+                 f, engine="numba", raw=True
+             )
++    @pytest.mark.xfail(
++        condition=not IS64,
++        reason="parfors not available on 32-bit",
++        raises=(UnsupportedParforsError, TypingError),
++        strict=False,
++    )
+     def test_table_method_rolling_methods(
+         self,
+         axis,
+@@ -408,6 +426,12 @@ class TestTableMethod:
+         )
+         tm.assert_frame_equal(result, expected)
++    @pytest.mark.xfail(
++        condition=not IS64,
++        reason="parfors not available on 32-bit",
++        raises=(UnsupportedParforsError, TypingError),
++        strict=False,
++    )
+     def test_table_method_expanding_methods(
+         self, axis, nogil, parallel, nopython, arithmetic_numba_supported_operators
+     ):
+--- a/pandas/tests/groupby/aggregate/test_numba.py
++++ b/pandas/tests/groupby/aggregate/test_numba.py
+@@ -11,6 +11,12 @@ from pandas import (
+     option_context,
+ )
+ import pandas._testing as tm
++from pandas.compat import IS64
++try:
++    from numba.core.errors import UnsupportedParforsError, TypingError
++except ImportError:  # numba not installed
++    UnsupportedParforsError = ImportError
++    TypingError = ImportError
+ pytestmark = pytest.mark.single_cpu
+@@ -252,6 +258,12 @@ def test_multifunc_numba_vs_cython_serie
+         ),
+     ],
+ )
++@pytest.mark.xfail(
++    condition=not IS64,
++    reason="parfors not available on 32-bit",
++    raises=UnsupportedParforsError,
++    strict=False,
++)
+ def test_multifunc_numba_kwarg_propagation(data, agg_kwargs):
+     pytest.importorskip("numba")
+     labels = ["a", "a", "b", "b", "a"]
diff --git a/patches/numba_warn_nonx86.patch b/patches/numba_warn_nonx86.patch
new file mode 100644 (file)
index 0000000..e911823
--- /dev/null
@@ -0,0 +1,30 @@
+Description: Warn that numba may not work on non-x86
+
+Currently known issues are crashes, not wrong answers, but because
+the test setup doesn't allow ignoring crashes but failing on
+wrong answers, it would be easy to not notice if this changed
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: no
+
+--- a/pandas/compat/_optional.py
++++ b/pandas/compat/_optional.py
+@@ -4,6 +4,9 @@ import importlib
+ import sys
+ from typing import TYPE_CHECKING
+ import warnings
++import platform
++import re
++warn_numba_platform = "Non-x86 system detected, Numba may give wrong results or crash" if not bool(re.match('i.?86|x86',platform.uname()[4])) else False
+ from pandas.util._exceptions import find_stack_level
+@@ -123,6 +126,8 @@ def import_optional_dependency(
+         is ``'warn'`` or ``'ignore'``.
+     """
+     assert errors in {"warn", "raise", "ignore"}
++    if name=='numba' and warn_numba_platform:
++        warnings.warn(warn_numba_platform)
+     package_name = INSTALL_MAPPING.get(name)
+     install_name = package_name if package_name is not None else name
diff --git a/patches/privacy.patch b/patches/privacy.patch
new file mode 100644 (file)
index 0000000..93b5039
--- /dev/null
@@ -0,0 +1,23 @@
+Description: Link to rather than embed Google calendar
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: not-needed
+
+--- a/doc/source/development/community.rst
++++ b/doc/source/development/community.rst
+@@ -48,14 +48,10 @@ The agenda for the next meeting and minu
+ Calendar
+ --------
+-This calendar shows all the community meetings. Our community meetings are
++`This calendar <https://calendar.google.com/calendar/embed?src=pgbn14p6poja8a1cf2dv2jhrmg%40group.calendar.google.com>`__ shows all the community meetings. Our community meetings are
+ ideal for anyone wanting to contribute to pandas, or just curious to know how
+ current development is going.
+-.. raw:: html
+-
+-   <iframe src="https://calendar.google.com/calendar/embed?src=pgbn14p6poja8a1cf2dv2jhrmg%40group.calendar.google.com" style="border: 0" width="800" height="600" frameborder="0" scrolling="no"></iframe>
+-
+ You can subscribe to this calendar with the following links:
+ * `iCal <https://calendar.google.com/calendar/ical/pgbn14p6poja8a1cf2dv2jhrmg%40group.calendar.google.com/public/basic.ics>`__
diff --git a/patches/privacy2.patch b/patches/privacy2.patch
new file mode 100644 (file)
index 0000000..d5d90e3
--- /dev/null
@@ -0,0 +1,16 @@
+Description: Use local logo file
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: no
+
+--- a/doc/source/conf.py
++++ b/doc/source/conf.py
+@@ -245,7 +245,7 @@ html_theme_options = {
+     "footer_start": ["pandas_footer", "sphinx-version"],
+     "github_url": "https://github.com/pandas-dev/pandas",
+     "twitter_url": "https://twitter.com/pandas_dev",
+-    "logo": {"image_dark": "https://pandas.pydata.org/static/img/pandas_white.svg"},
++    "logo": {"image_dark": "../../web/pandas/static/img/pandas_white.svg"},
+     "navbar_align": "left",
+     "navbar_end": ["version-switcher", "theme-switcher", "navbar-icon-links"],
+     "switcher": {
diff --git a/patches/pytables_python3p12.patch b/patches/pytables_python3p12.patch
new file mode 100644 (file)
index 0000000..f2eadde
--- /dev/null
@@ -0,0 +1,54 @@
+Description: Ignore pytables test failures with Python 3.12
+
+The combined xfails are because two separate xfails where only one
+has run=False may run, which is a problem when the run=False exists
+to avoid a crash - see 1.5.3+dfsg-8 armhf build log
+
+Bug-Debian: https://bugs.debian.org/1055801
+Bug-Ubuntu: https://launchpad.net/ubuntu/+bug/2043895
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: no
+
+--- a/pandas/tests/io/pytables/test_append.py
++++ b/pandas/tests/io/pytables/test_append.py
+@@ -286,7 +286,8 @@ def test_append_all_nans(setup_path):
+             tm.assert_frame_equal(store["df2"], df, check_index_type=True)
+-@pytest.mark.xfail(condition=is_crashing_arch,reason="https://bugs.debian.org/790925",strict=False,run=False)
++from pandas.compat import PY312
++@pytest.mark.xfail(condition=PY312 or is_crashing_arch, reason="https://bugs.debian.org/1055801 and https://bugs.debian.org/790925",raises=ValueError,strict=False, run=not is_crashing_arch)
+ def test_append_frame_column_oriented(setup_path):
+     with ensure_clean_store(setup_path) as store:
+         # column oriented
+--- a/pandas/tests/io/pytables/test_select.py
++++ b/pandas/tests/io/pytables/test_select.py
+@@ -168,6 +168,8 @@ def test_select(setup_path):
+         tm.assert_frame_equal(expected, result)
++from pandas.compat import PY312
++@pytest.mark.xfail(condition=PY312, reason="python3.12 https://bugs.debian.org/1055801",raises=ValueError,strict=False)
+ def test_select_dtypes(setup_path):
+     with ensure_clean_store(setup_path) as store:
+         # with a Timestamp data column (GH #2637)
+@@ -607,6 +609,7 @@ def test_select_iterator_many_empty_fram
+         assert len(results) == 0
++@pytest.mark.xfail(condition=PY312, reason="python3.12 https://bugs.debian.org/1055801",raises=TypeError,strict=False)
+ def test_frame_select(setup_path):
+     df = DataFrame(
+         np.random.default_rng(2).standard_normal((10, 4)),
+--- a/pandas/tests/io/pytables/test_store.py
++++ b/pandas/tests/io/pytables/test_store.py
+@@ -884,7 +884,8 @@ def test_start_stop_fixed(setup_path):
+         df.iloc[8:10, -2] = np.nan
+-@pytest.mark.xfail(condition=is_crashing_arch,reason="https://bugs.debian.org/790925",strict=False,run=False)
++from pandas.compat import PY312
++@pytest.mark.xfail(condition=PY312 or is_crashing_arch, reason="https://bugs.debian.org/1055801 and https://bugs.debian.org/790925",raises=ValueError,strict=False, run=not is_crashing_arch)
+ def test_select_filter_corner(setup_path):
+     df = DataFrame(np.random.default_rng(2).standard_normal((50, 100)))
+     df.index = [f"{c:3d}" for c in df.index]
diff --git a/patches/remove_ccbysa_snippets.patch b/patches/remove_ccbysa_snippets.patch
new file mode 100644 (file)
index 0000000..7ac89f9
--- /dev/null
@@ -0,0 +1,229 @@
+Description: Remove code from Stack Overflow
+
+Stack Overflow content is CC-BY-SA licensed,
+which this package is not supposed to be.  These snippets may be
+too small to be copyrightable, but removing them to be safe.
+
+https://lists.debian.org/debian-legal/2020/04/threads.html#00018
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: no - deletes some tests/examples without replacement
+
+--- /dev/null
++++ b/doc/source/user_guide/cookbook.rst
+@@ -0,0 +1,22 @@
++.. _cookbook:
++
++{{ header }}
++
++.. _cookbook.idioms:
++.. _cookbook.selection:
++.. _cookbook.multi_index:
++.. _cookbook.missing_data:
++.. _cookbook.grouping:
++.. _cookbook.pivot:
++.. _cookbook.resample:
++.. _cookbook.merge:
++.. _cookbook.plotting:
++.. _cookbook.csv:
++.. _cookbook.csv.multiple_files:
++.. _cookbook.sql:
++.. _cookbook.excel:
++.. _cookbook.html:
++.. _cookbook.hdf:
++.. _cookbook.binary:
++
++This page has been removed for copyright reasons.
+--- a/doc/source/user_guide/index.rst
++++ b/doc/source/user_guide/index.rst
+@@ -87,4 +87,3 @@ Guides
+     scale
+     sparse
+     gotchas
+-    cookbook
+--- a/pandas/io/sql.py
++++ b/pandas/io/sql.py
+@@ -2465,14 +2465,14 @@ def _get_valid_sqlite_name(name: object)
+     # Replace all " with "".
+     # Wrap the entire thing in double quotes.
+-    uname = _get_unicode_name(name)
+-    if not len(uname):
++    name = _get_unicode_name(name)
++    if not len(name):
+         raise ValueError("Empty table or column name specified")
+-    nul_index = uname.find("\x00")
+-    if nul_index >= 0:
++    if '\0' in name:
+         raise ValueError("SQLite identifier cannot contain NULs")
+-    return '"' + uname.replace('"', '""') + '"'
++    name = name.replace('"', '""')
++    return '"' + name + '"'
+ class SQLiteTable(SQLTable):
+--- a/pandas/tests/groupby/test_categorical.py
++++ b/pandas/tests/groupby/test_categorical.py
+@@ -988,28 +988,6 @@ def test_groupby_empty_with_category():
+     tm.assert_series_equal(result, expected)
+-def test_sort():
+-    # https://stackoverflow.com/questions/23814368/sorting-pandas-
+-    #        categorical-labels-after-groupby
+-    # This should result in a properly sorted Series so that the plot
+-    # has a sorted x axis
+-    # self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
+-
+-    df = DataFrame({"value": np.random.default_rng(2).integers(0, 10000, 100)})
+-    labels = [f"{i} - {i+499}" for i in range(0, 10000, 500)]
+-    cat_labels = Categorical(labels, labels)
+-
+-    df = df.sort_values(by=["value"], ascending=True)
+-    df["value_group"] = pd.cut(
+-        df.value, range(0, 10500, 500), right=False, labels=cat_labels
+-    )
+-
+-    res = df.groupby(["value_group"], observed=False)["value_group"].count()
+-    exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
+-    exp.index = CategoricalIndex(exp.index, name=exp.index.name)
+-    tm.assert_series_equal(res, exp)
+-
+-
+ @pytest.mark.parametrize("ordered", [True, False])
+ def test_sort2(sort, ordered):
+     # dataframe groupby sort was being ignored # GH 8868
+--- a/pandas/tests/indexing/multiindex/test_chaining_and_caching.py
++++ b/pandas/tests/indexing/multiindex/test_chaining_and_caching.py
+@@ -13,35 +13,6 @@ from pandas import (
+ import pandas._testing as tm
+-def test_detect_chained_assignment(using_copy_on_write, warn_copy_on_write):
+-    # Inplace ops, originally from:
+-    # https://stackoverflow.com/questions/20508968/series-fillna-in-a-multiindex-dataframe-does-not-fill-is-this-a-bug
+-    a = [12, 23]
+-    b = [123, None]
+-    c = [1234, 2345]
+-    d = [12345, 23456]
+-    tuples = [("eyes", "left"), ("eyes", "right"), ("ears", "left"), ("ears", "right")]
+-    events = {
+-        ("eyes", "left"): a,
+-        ("eyes", "right"): b,
+-        ("ears", "left"): c,
+-        ("ears", "right"): d,
+-    }
+-    multiind = MultiIndex.from_tuples(tuples, names=["part", "side"])
+-    zed = DataFrame(events, index=["a", "b"], columns=multiind)
+-
+-    if using_copy_on_write:
+-        with tm.raises_chained_assignment_error():
+-            zed["eyes"]["right"].fillna(value=555, inplace=True)
+-    elif warn_copy_on_write:
+-        with tm.assert_produces_warning(None):
+-            zed["eyes"]["right"].fillna(value=555, inplace=True)
+-    else:
+-        msg = "A value is trying to be set on a copy of a slice from a DataFrame"
+-        with pytest.raises(SettingWithCopyError, match=msg):
+-            with tm.assert_produces_warning(None):
+-                zed["eyes"]["right"].fillna(value=555, inplace=True)
+-
+ @td.skip_array_manager_invalid_test  # with ArrayManager df.loc[0] is not a view
+ def test_cache_updating(using_copy_on_write, warn_copy_on_write):
+--- a/pandas/tests/indexing/multiindex/test_setitem.py
++++ b/pandas/tests/indexing/multiindex/test_setitem.py
+@@ -154,36 +154,7 @@ class TestMultiIndexSetItem:
+         with pytest.raises(TypeError, match=msg):
+             df.loc["bar"] *= 2
+-    def test_multiindex_setitem2(self):
+-        # from SO
+-        # https://stackoverflow.com/questions/24572040/pandas-access-the-level-of-multiindex-for-inplace-operation
+-        df_orig = DataFrame.from_dict(
+-            {
+-                "price": {
+-                    ("DE", "Coal", "Stock"): 2,
+-                    ("DE", "Gas", "Stock"): 4,
+-                    ("DE", "Elec", "Demand"): 1,
+-                    ("FR", "Gas", "Stock"): 5,
+-                    ("FR", "Solar", "SupIm"): 0,
+-                    ("FR", "Wind", "SupIm"): 0,
+-                }
+-            }
+-        )
+-        df_orig.index = MultiIndex.from_tuples(
+-            df_orig.index, names=["Sit", "Com", "Type"]
+-        )
+-        expected = df_orig.copy()
+-        expected.iloc[[0, 1, 3]] *= 2
+-
+-        idx = pd.IndexSlice
+-        df = df_orig.copy()
+-        df.loc[idx[:, :, "Stock"], :] *= 2
+-        tm.assert_frame_equal(df, expected)
+-
+-        df = df_orig.copy()
+-        df.loc[idx[:, :, "Stock"], "price"] *= 2
+-        tm.assert_frame_equal(df, expected)
+     def test_multiindex_assignment(self):
+         # GH3777 part 2
+--- a/pandas/tests/indexing/test_chaining_and_caching.py
++++ b/pandas/tests/indexing/test_chaining_and_caching.py
+@@ -429,27 +429,6 @@ class TestChaining:
+         df["column1"] = df["column1"] + "c"
+         str(df)
+-    @pytest.mark.arm_slow
+-    def test_detect_chained_assignment_undefined_column(
+-        self, using_copy_on_write, warn_copy_on_write
+-    ):
+-        # from SO:
+-        # https://stackoverflow.com/questions/24054495/potential-bug-setting-value-for-undefined-column-using-iloc
+-        df = DataFrame(np.arange(0, 9), columns=["count"])
+-        df["group"] = "b"
+-        df_original = df.copy()
+-
+-        if using_copy_on_write:
+-            with tm.raises_chained_assignment_error():
+-                df.iloc[0:5]["group"] = "a"
+-            tm.assert_frame_equal(df, df_original)
+-        elif warn_copy_on_write:
+-            with tm.raises_chained_assignment_error():
+-                df.iloc[0:5]["group"] = "a"
+-        else:
+-            with pytest.raises(SettingWithCopyError, match=msg):
+-                with tm.raises_chained_assignment_error():
+-                    df.iloc[0:5]["group"] = "a"
+     @pytest.mark.arm_slow
+     def test_detect_chained_assignment_changing_dtype(
+--- a/pandas/tests/io/parser/common/test_common_basic.py
++++ b/pandas/tests/io/parser/common/test_common_basic.py
+@@ -381,23 +381,6 @@ def test_trailing_delimiters(all_parsers
+     tm.assert_frame_equal(result, expected)
+-def test_escapechar(all_parsers):
+-    # https://stackoverflow.com/questions/13824840/feature-request-for-
+-    # pandas-read-csv
+-    data = '''SEARCH_TERM,ACTUAL_URL
+-"bra tv board","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
+-"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
+-"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals series","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
+-
+-    parser = all_parsers
+-    result = parser.read_csv(
+-        StringIO(data), escapechar="\\", quotechar='"', encoding="utf-8"
+-    )
+-
+-    assert result["SEARCH_TERM"][2] == 'SLAGBORD, "Bergslagen", IKEA:s 1700-tals series'
+-
+-    tm.assert_index_equal(result.columns, Index(["SEARCH_TERM", "ACTUAL_URL"]))
+-
+ def test_ignore_leading_whitespace(all_parsers):
+     # see gh-3374, gh-6607
diff --git a/patches/series b/patches/series
new file mode 100644 (file)
index 0000000..37ebe12
--- /dev/null
@@ -0,0 +1,42 @@
+deb_nonversioneer_version.patch
+deb_doc_donotoverride_PYTHONPATH.patch
+xfail_tests_nonintel_io.patch
+deb_disable_analytics.patch
+mathjax-path.patch
+use_system_intersphinx.patch
+contributor_list_not_in_tarball.patch
+fix_random_seeds.patch
+privacy.patch
+find_test_data.patch
+remove_ccbysa_snippets.patch
+numba_fail_32bit.patch
+hurd_compat.patch
+skip_test_missing_required_dependency.patch
+fix_overly_arch_specific_xfails.patch
+tests_dont_assume_64bit.patch
+armel_ignore_nonwarning.patch
+1029251_ignore_rounding_error.patch
+tests_dont_assume_endian.patch
+accept_system_tzdata.patch
+numba_warn_nonx86.patch
+mips_pow_nan.patch
+allow_no_matplotlib.patch
+unbreak_clean.patch
+ignore_ipython_exceptions.patch
+xarray_version_workaround.patch
+allow_no_openpyxl.patch
+2p1_openpyxl_errors.patch
+pytables_python3p12.patch
+ignore_python3p12_deprecations.patch
+sum_loosen_test_tolerance.patch
+1068104_time64.patch
+1068422_ignore_dask_tests.patch
+versioned_importorskip.patch
+add_missing_importorskip.patch
+blosc_nonstrict_xfail.patch
+no_pkg_resources.patch
+no_pytz_datetime.patch
+1088988_xarray_pyreadstat_compat.patch
+privacy2.patch
+value_counts_nat_numpy2.patch
+ignore_test_1094417.patch
diff --git a/patches/skip_test_missing_required_dependency.patch b/patches/skip_test_missing_required_dependency.patch
new file mode 100644 (file)
index 0000000..ca9ae07
--- /dev/null
@@ -0,0 +1,15 @@
+Description: Skip test that fails from outside the source tree
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: not-needed
+
+--- a/pandas/tests/test_downstream.py
++++ b/pandas/tests/test_downstream.py
+@@ -193,6 +193,7 @@ def test_yaml_dump(df):
+ @pytest.mark.single_cpu
++@pytest.mark.skip(reason="Fails in an installed package as it can't find pandas")
+ def test_missing_required_dependency():
+     # GH 23868
+     # To ensure proper isolation, we pass these flags
diff --git a/patches/sum_loosen_test_tolerance.patch b/patches/sum_loosen_test_tolerance.patch
new file mode 100644 (file)
index 0000000..22effec
--- /dev/null
@@ -0,0 +1,19 @@
+Description: Don't fail sum test on near-cancelling inputs
+
+Failed in 1.5.3+dfsg-11 after it happened to select an input that
+cancelled to ~1e-5 (on i386, so x87 excess precision might be involved)
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: no
+
+--- a/pandas/tests/frame/test_reductions.py
++++ b/pandas/tests/frame/test_reductions.py
+@@ -344,7 +344,7 @@ class TestDataFrameAnalytics:
+             np.sum,
+             mixed_float_frame.astype("float32"),
+             check_dtype=False,
+-            rtol=1e-3,
++            rtol=1e-3, atol=1e-3,
+         )
+         assert_stat_op_calc(
diff --git a/patches/tests_dont_assume_64bit.patch b/patches/tests_dont_assume_64bit.patch
new file mode 100644 (file)
index 0000000..a5849c2
--- /dev/null
@@ -0,0 +1,136 @@
+Description: Fix test failures on 32-bit systems
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Bug-Debian: partly https://bugs.debian.org/1026351
+Forwarded: no
+
+--- a/pandas/tests/groupby/test_groupby.py
++++ b/pandas/tests/groupby/test_groupby.py
+@@ -6,6 +6,7 @@ import re
+ import numpy as np
+ import pytest
++from pandas.compat import IS64
+ from pandas.errors import (
+     PerformanceWarning,
+     SpecificationError,
+@@ -2618,6 +2619,7 @@ def test_groupby_series_with_tuple_name(
+     tm.assert_series_equal(result, expected)
++@pytest.mark.xfail(not IS64, reason="GH#38778: fail on 32-bit system", strict=False)
+ @pytest.mark.parametrize(
+     "func, values", [("sum", [97.0, 98.0]), ("mean", [24.25, 24.5])]
+ )
+@@ -2630,6 +2632,7 @@ def test_groupby_numerical_stability_sum
+     tm.assert_frame_equal(result, expected)
++@pytest.mark.xfail(not IS64, reason="GH#38778: fail on 32-bit system", strict=False)
+ def test_groupby_numerical_stability_cumsum():
+     # GH#38934
+     data = [1e16, 1e16, 97, 98, -5e15, -5e15, -5e15, -5e15]
+--- a/pandas/tests/test_sorting.py
++++ b/pandas/tests/test_sorting.py
+@@ -4,6 +4,7 @@ from itertools import product
+ import numpy as np
+ import pytest
++from pandas.compat import IS64
+ from pandas import (
+     NA,
+@@ -218,6 +219,7 @@ class TestMerge:
+         assert result.name is None
+     @pytest.mark.slow
++    @pytest.mark.xfail(condition=not IS64, reason="assumes default int is int64")
+     @pytest.mark.parametrize("how", ["left", "right", "outer", "inner"])
+     def test_int64_overflow_how_merge(self, left_right, how):
+         left, right = left_right
+@@ -228,6 +230,7 @@ class TestMerge:
+         tm.assert_frame_equal(out, merge(left, right, how=how, sort=True))
+     @pytest.mark.slow
++    @pytest.mark.xfail(condition=not IS64, reason="assumes default int is int64")
+     def test_int64_overflow_sort_false_order(self, left_right):
+         left, right = left_right
+@@ -239,6 +242,7 @@ class TestMerge:
+         tm.assert_frame_equal(right, out[right.columns.tolist()])
+     @pytest.mark.slow
++    @pytest.mark.xfail(condition=not IS64, reason="assumes default int is int64", strict=False)
+     @pytest.mark.parametrize("how", ["left", "right", "outer", "inner"])
+     @pytest.mark.parametrize("sort", [True, False])
+     def test_int64_overflow_one_to_many_none_match(self, how, sort):
+--- a/pandas/tests/frame/test_stack_unstack.py
++++ b/pandas/tests/frame/test_stack_unstack.py
+@@ -20,6 +20,7 @@ from pandas import (
+ )
+ import pandas._testing as tm
+ from pandas.core.reshape import reshape as reshape_lib
++from pandas.compat import IS64
+ @pytest.fixture(params=[True, False])
+@@ -2175,6 +2176,7 @@ class TestStackUnstackMultiLevel:
+         tm.assert_frame_equal(recons, df)
+     @pytest.mark.slow
++    @pytest.mark.xfail(condition=not IS64, reason="assumes default int is int64")
+     def test_unstack_number_of_levels_larger_than_int32(self, monkeypatch):
+         # GH#20601
+         # GH 26314: Change ValueError to PerformanceWarning
+--- a/pandas/tests/reshape/test_pivot.py
++++ b/pandas/tests/reshape/test_pivot.py
+@@ -28,6 +28,7 @@ import pandas._testing as tm
+ from pandas.api.types import CategoricalDtype
+ from pandas.core.reshape import reshape as reshape_lib
+ from pandas.core.reshape.pivot import pivot_table
++from pandas.compat import IS64
+ @pytest.fixture(params=[True, False])
+@@ -2092,6 +2093,7 @@ class TestPivotTable:
+         tm.assert_frame_equal(result, expected)
+     @pytest.mark.slow
++    @pytest.mark.xfail(condition=not IS64, reason="assumes default int is int64")
+     def test_pivot_number_of_levels_larger_than_int32(self, monkeypatch):
+         # GH 20601
+         # GH 26314: Change ValueError to PerformanceWarning
+--- a/pandas/tests/test_downstream.py
++++ b/pandas/tests/test_downstream.py
+@@ -19,6 +19,7 @@ from pandas import (
+     TimedeltaIndex,
+ )
+ import pandas._testing as tm
++from pandas.compat import IS64
+ from pandas.core.arrays import (
+     DatetimeArray,
+     TimedeltaArray,
+@@ -230,6 +231,11 @@ def test_missing_required_dependency():
+         assert name in output
++@pytest.mark.xfail(
++    condition=not IS64,
++    reason="dask has different nativesize-int vs int64 type rules",
++    strict=False,
++)
+ def test_frame_setitem_dask_array_into_new_col():
+     # GH#47128
+--- a/pandas/tests/series/methods/test_round.py
++++ b/pandas/tests/series/methods/test_round.py
+@@ -30,8 +30,7 @@ class TestSeriesRound:
+     def test_round_numpy_with_nan(self, any_float_dtype):
+         # See GH#14197
+         ser = Series([1.53, np.nan, 0.06], dtype=any_float_dtype)
+-        with tm.assert_produces_warning(None):
+-            result = ser.round()
++        result = ser.round() # on armhf, numpy warns
+         expected = Series([2.0, np.nan, 0.0], dtype=any_float_dtype)
+         tm.assert_series_equal(result, expected)
diff --git a/patches/tests_dont_assume_endian.patch b/patches/tests_dont_assume_endian.patch
new file mode 100644 (file)
index 0000000..fc770ca
--- /dev/null
@@ -0,0 +1,31 @@
+Description: Don't assume little-endian in test references
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: no
+
+--- a/pandas/tests/indexes/interval/test_constructors.py
++++ b/pandas/tests/indexes/interval/test_constructors.py
+@@ -23,7 +23,7 @@ from pandas import (
+ import pandas._testing as tm
+ from pandas.core.arrays import IntervalArray
+ import pandas.core.common as com
+-
++from pandas.compat import is_platform_little_endian
+ @pytest.fixture(params=[None, "foo"])
+ def name(request):
+@@ -44,12 +44,12 @@ class ConstructorTests:
+             (Index(np.arange(-10, 11, dtype=np.int64)), np.int64),
+             (Index(np.arange(10, 31, dtype=np.uint64)), np.uint64),
+             (Index(np.arange(20, 30, 0.5), dtype=np.float64), np.float64),
+-            (date_range("20180101", periods=10), "<M8[ns]"),
++            (date_range("20180101", periods=10), "<M8[ns]" if is_platform_little_endian() else ">M8[ns]"),
+             (
+                 date_range("20180101", periods=10, tz="US/Eastern"),
+                 "datetime64[ns, US/Eastern]",
+             ),
+-            (timedelta_range("1 day", periods=10), "<m8[ns]"),
++            (timedelta_range("1 day", periods=10), "<m8[ns]" if is_platform_little_endian() else ">m8[ns]"),
+         ]
+     )
+     def breaks_and_expected_subtype(self, request):
diff --git a/patches/unbreak_clean.patch b/patches/unbreak_clean.patch
new file mode 100644 (file)
index 0000000..d2c32d8
--- /dev/null
@@ -0,0 +1,24 @@
+Description: Don't crash on clean
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: no
+
+--- a/setup.py
++++ b/setup.py
+@@ -152,14 +152,14 @@ class CleanCommand(Command):
+                     ".orig",
+                 ):
+                     self._clean_me.append(filepath)
+-            self._clean_trees.append(pjoin(root, d) for d in dirs if d == "__pycache__")
++            self._clean_trees.extend([pjoin(root, d) for d in dirs if d == "__pycache__"])
+         # clean the generated pxi files
+         for pxifile in _pxifiles:
+             pxifile_replaced = pxifile.replace(".pxi.in", ".pxi")
+             self._clean_me.append(pxifile_replaced)
+-        self._clean_trees.append(d for d in ("build", "dist") if os.path.exists(d))
++        self._clean_trees.extend([d for d in ("build", "dist") if os.path.exists(d)])
+     def finalize_options(self) -> None:
+         pass
diff --git a/patches/use_system_intersphinx.patch b/patches/use_system_intersphinx.patch
new file mode 100644 (file)
index 0000000..ace2de2
--- /dev/null
@@ -0,0 +1,25 @@
+Description: Use packaged intersphinx indexes
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Bug-Debian: https://bugs.debian.org/876417
+Forwarded: not-needed
+
+--- a/doc/source/conf.py
++++ b/doc/source/conf.py
+@@ -458,11 +458,11 @@ latex_documents = [
+ if include_api:
+     intersphinx_mapping = {
+         "dateutil": ("https://dateutil.readthedocs.io/en/latest/", None),
+-        "matplotlib": ("https://matplotlib.org/stable/", None),
+-        "numpy": ("https://numpy.org/doc/stable/", None),
+-        "py": ("https://pylib.readthedocs.io/en/latest/", None),
+-        "python": ("https://docs.python.org/3/", None),
+-        "scipy": ("https://docs.scipy.org/doc/scipy/", None),
++        "matplotlib": ("https://matplotlib.org/stable/", "/usr/share/doc/python-matplotlib-doc/html/objects.inv"),
++        "numpy": ("https://numpy.org/doc/stable/", "/usr/share/doc/python-numpy-doc/html/objects.inv"),
++        "py": ("https://pylib.readthedocs.io/en/latest/", None), # no -doc in Debian
++        "python": ("https://docs.python.org/3/", "/usr/share/doc/python3-doc/html/objects.inv"),
++        "scipy": ("https://docs.scipy.org/doc/scipy/", ("/usr/share/doc/python-scipy-doc/html/objects.inv","/usr/share/doc/python-scipy/html/objects.inv")),
+         "pyarrow": ("https://arrow.apache.org/docs/", None),
+     }
diff --git a/patches/value_counts_nat_numpy2.patch b/patches/value_counts_nat_numpy2.patch
new file mode 100644 (file)
index 0000000..c0e43e9
--- /dev/null
@@ -0,0 +1,19 @@
+Description: Avoid test crash
+
+First seen on numpy2 transition, unknown if actually related to that
+
+Author: Matthew Roeschke
+Origin: upstream pull 60416
+Forwarded: not-needed
+
+--- pandas-2.2.3+dfsg.orig/pandas/tests/test_algos.py
++++ pandas-2.2.3+dfsg/pandas/tests/test_algos.py
+@@ -1281,7 +1281,7 @@ class TestValueCounts:
+             result_dt = algos.value_counts(dt)
+         tm.assert_series_equal(result_dt, exp_dt)
+-        exp_td = Series({np.timedelta64(10000): 1}, name="count")
++        exp_td = Series([1], index=[np.timedelta64(10000)], name="count")
+         with tm.assert_produces_warning(FutureWarning, match=msg):
+             result_td = algos.value_counts(td)
+         tm.assert_series_equal(result_td, exp_td)
diff --git a/patches/versioned_importorskip.patch b/patches/versioned_importorskip.patch
new file mode 100644 (file)
index 0000000..c680d30
--- /dev/null
@@ -0,0 +1,5935 @@
+Description: Avoid failing when a dependency is too old
+
+(some of them are pulled in by other dependencies, so
+just not including them in d/control doesn't stop them being installed)
+
+Most of the content of this patch was generated by the following Python code:
+
+import pathlib
+import re
+
+basedir = pathlib.Path.cwd()
+if not (basedir / 'pandas/tests').exists():
+    raise FileNotFoundError('must be run from the pandas root')
+for source_file in basedir.glob('pandas/**/*.py'):
+    with open(source_file, 'r') as fd:
+        source_text = fd.read()
+    if 'pytest.importorskip' in source_text:
+        source_text = re.sub(r'pytest\.importorskip(.*)minversion', r'td.versioned_importorskip\1min_version', source_text)
+        source_text = re.sub(r'pytest\.importorskip', r'td.versioned_importorskip', source_text)
+        if '_test_decorators as td' not in source_text:
+            # add the import if it isn't already present
+            source_text, count = re.subn(r'^(import pandas|from pandas.*import)',r'import pandas.util._test_decorators as td\n\1', source_text, count=1, flags=re.MULTILINE)
+            if count != 1:
+                raise KeyError("failed to add import")
+        with open(source_file, 'w') as fd:
+            fd.write(source_text)
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: no
+
+--- a/pandas/_testing/_io.py
++++ b/pandas/_testing/_io.py
+@@ -107,9 +107,9 @@ def round_trip_localpath(writer, reader,
+     pandas object
+         The original object that was serialized and then re-read.
+     """
+-    import pytest
++    import pandas.util._test_decorators as td
+-    LocalPath = pytest.importorskip("py.path").local
++    LocalPath = td.versioned_importorskip("py.path").local
+     if path is None:
+         path = "___localpath___"
+     with ensure_clean(path) as path:
+--- a/pandas/conftest.py
++++ b/pandas/conftest.py
+@@ -1816,7 +1816,7 @@ def ip():
+     Will raise a skip if IPython is not installed.
+     """
+-    pytest.importorskip("IPython", minversion="6.0.0")
++    td.versioned_importorskip("IPython", min_version="6.0.0")
+     from IPython.core.interactiveshell import InteractiveShell
+     # GH#35711 make sure sqlite history file handle is not leaked
+@@ -1833,7 +1833,7 @@ def spmatrix(request):
+     """
+     Yields scipy sparse matrix classes.
+     """
+-    sparse = pytest.importorskip("scipy.sparse")
++    sparse = td.versioned_importorskip("scipy.sparse")
+     return getattr(sparse, request.param + "_matrix")
+--- a/pandas/tests/apply/test_frame_apply.py
++++ b/pandas/tests/apply/test_frame_apply.py
+@@ -4,6 +4,7 @@ import warnings
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.core.dtypes.dtypes import CategoricalDtype
+ import pandas as pd
+@@ -35,7 +36,7 @@ def int_frame_const_col():
+ @pytest.fixture(params=["python", pytest.param("numba", marks=pytest.mark.single_cpu)])
+ def engine(request):
+     if request.param == "numba":
+-        pytest.importorskip("numba")
++        td.versioned_importorskip("numba")
+     return request.param
+--- a/pandas/tests/apply/test_numba.py
++++ b/pandas/tests/apply/test_numba.py
+@@ -26,7 +26,7 @@ def test_numba_vs_python_noop(float_fram
+ def test_numba_vs_python_string_index():
+     # GH#56189
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     df = DataFrame(
+         1,
+         index=Index(["a", "b"], dtype="string[pyarrow_numpy]"),
+--- a/pandas/tests/arrays/categorical/test_warnings.py
++++ b/pandas/tests/arrays/categorical/test_warnings.py
+@@ -1,12 +1,13 @@
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas._testing as tm
+ class TestCategoricalWarnings:
+     def test_tab_complete_warning(self, ip):
+         # https://github.com/pandas-dev/pandas/issues/16409
+-        pytest.importorskip("IPython", minversion="6.0.0")
++        td.versioned_importorskip("IPython", min_version="6.0.0")
+         from IPython.core.completer import provisionalcompleter
+         code = "import pandas as pd; c = pd.Categorical([])"
+--- a/pandas/tests/arrays/datetimes/test_constructors.py
++++ b/pandas/tests/arrays/datetimes/test_constructors.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas._libs import iNaT
+ from pandas.core.dtypes.dtypes import DatetimeTZDtype
+@@ -226,7 +227,7 @@ COARSE_TO_FINE_SAFE = [123, None, -123]
+ def test_from_arrow_with_different_units_and_timezones_with(
+     pa_unit, pd_unit, pa_tz, pd_tz, data
+ ):
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     pa_type = pa.timestamp(pa_unit, tz=pa_tz)
+     arr = pa.array(data, type=pa_type)
+@@ -253,7 +254,7 @@ def test_from_arrow_with_different_units
+     ],
+ )
+ def test_from_arrow_from_empty(unit, tz):
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     data = []
+     arr = pa.array(data)
+@@ -269,7 +270,7 @@ def test_from_arrow_from_empty(unit, tz)
+ def test_from_arrow_from_integers():
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     data = [0, 123456789, None, 2**63 - 1, iNaT, -123456789]
+     arr = pa.array(data)
+--- a/pandas/tests/arrays/interval/test_interval_pyarrow.py
++++ b/pandas/tests/arrays/interval/test_interval_pyarrow.py
+@@ -1,13 +1,14 @@
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ import pandas._testing as tm
+ from pandas.core.arrays import IntervalArray
+ def test_arrow_extension_type():
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     from pandas.core.arrays.arrow.extension_types import ArrowIntervalType
+@@ -23,7 +24,7 @@ def test_arrow_extension_type():
+ def test_arrow_array():
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     from pandas.core.arrays.arrow.extension_types import ArrowIntervalType
+@@ -52,7 +53,7 @@ def test_arrow_array():
+ def test_arrow_array_missing():
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     from pandas.core.arrays.arrow.extension_types import ArrowIntervalType
+@@ -89,7 +90,7 @@ def test_arrow_array_missing():
+     ids=["float", "datetime64[ns]"],
+ )
+ def test_arrow_table_roundtrip(breaks):
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     from pandas.core.arrays.arrow.extension_types import ArrowIntervalType
+@@ -125,7 +126,7 @@ def test_arrow_table_roundtrip(breaks):
+     ids=["float", "datetime64[ns]"],
+ )
+ def test_arrow_table_roundtrip_without_metadata(breaks):
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     arr = IntervalArray.from_breaks(breaks)
+     arr[1] = None
+@@ -145,7 +146,7 @@ def test_from_arrow_from_raw_struct_arra
+     # in case pyarrow lost the Interval extension type (eg on parquet roundtrip
+     # with datetime64[ns] subtype, see GH-45881), still allow conversion
+     # from arrow to IntervalArray
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     arr = pa.array([{"left": 0, "right": 1}, {"left": 1, "right": 2}])
+     dtype = pd.IntervalDtype(np.dtype("int64"), closed="neither")
+--- a/pandas/tests/arrays/masked/test_arrow_compat.py
++++ b/pandas/tests/arrays/masked/test_arrow_compat.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ import pandas._testing as tm
+@@ -8,7 +9,7 @@ pytestmark = pytest.mark.filterwarnings(
+     "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
+ )
+-pa = pytest.importorskip("pyarrow")
++pa = td.versioned_importorskip("pyarrow")
+ from pandas.core.arrays.arrow._arrow_utils import pyarrow_array_to_numpy_and_mask
+--- a/pandas/tests/arrays/period/test_arrow_compat.py
++++ b/pandas/tests/arrays/period/test_arrow_compat.py
+@@ -1,5 +1,6 @@
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.compat.pyarrow import pa_version_under10p1
+ from pandas.core.dtypes.dtypes import PeriodDtype
+@@ -16,7 +17,7 @@ pytestmark = pytest.mark.filterwarnings(
+ )
+-pa = pytest.importorskip("pyarrow")
++pa = td.versioned_importorskip("pyarrow")
+ def test_arrow_extension_type():
+--- a/pandas/tests/arrays/sparse/test_accessor.py
++++ b/pandas/tests/arrays/sparse/test_accessor.py
+@@ -3,6 +3,7 @@ import string
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import SparseDtype
+ import pandas._testing as tm
+@@ -26,7 +27,7 @@ class TestSeriesAccessor:
+         assert result == expected
+     def test_from_coo(self):
+-        scipy_sparse = pytest.importorskip("scipy.sparse")
++        scipy_sparse = td.versioned_importorskip("scipy.sparse")
+         row = [0, 3, 1, 0]
+         col = [0, 3, 1, 2]
+@@ -64,7 +65,7 @@ class TestSeriesAccessor:
+     def test_to_coo(
+         self, sort_labels, expected_rows, expected_cols, expected_values_pos
+     ):
+-        sp_sparse = pytest.importorskip("scipy.sparse")
++        sp_sparse = td.versioned_importorskip("scipy.sparse")
+         values = SparseArray([0, np.nan, 1, 0, None, 3], fill_value=0)
+         index = pd.MultiIndex.from_tuples(
+@@ -107,7 +108,7 @@ class TestFrameAccessor:
+     @pytest.mark.parametrize("labels", [None, list(string.ascii_letters[:10])])
+     @pytest.mark.parametrize("dtype", ["float64", "int64"])
+     def test_from_spmatrix(self, format, labels, dtype):
+-        sp_sparse = pytest.importorskip("scipy.sparse")
++        sp_sparse = td.versioned_importorskip("scipy.sparse")
+         sp_dtype = SparseDtype(dtype, np.array(0, dtype=dtype).item())
+@@ -120,7 +121,7 @@ class TestFrameAccessor:
+     @pytest.mark.parametrize("format", ["csc", "csr", "coo"])
+     def test_from_spmatrix_including_explicit_zero(self, format):
+-        sp_sparse = pytest.importorskip("scipy.sparse")
++        sp_sparse = td.versioned_importorskip("scipy.sparse")
+         mat = sp_sparse.random(10, 2, density=0.5, format=format)
+         mat.data[0] = 0
+@@ -134,7 +135,7 @@ class TestFrameAccessor:
+         [["a", "b"], pd.MultiIndex.from_product([["A"], ["a", "b"]]), ["a", "a"]],
+     )
+     def test_from_spmatrix_columns(self, columns):
+-        sp_sparse = pytest.importorskip("scipy.sparse")
++        sp_sparse = td.versioned_importorskip("scipy.sparse")
+         dtype = SparseDtype("float64", 0.0)
+@@ -147,7 +148,7 @@ class TestFrameAccessor:
+         "colnames", [("A", "B"), (1, 2), (1, pd.NA), (0.1, 0.2), ("x", "x"), (0, 0)]
+     )
+     def test_to_coo(self, colnames):
+-        sp_sparse = pytest.importorskip("scipy.sparse")
++        sp_sparse = td.versioned_importorskip("scipy.sparse")
+         df = pd.DataFrame(
+             {colnames[0]: [0, 1, 0], colnames[1]: [1, 0, 0]}, dtype="Sparse[int64, 0]"
+@@ -158,7 +159,7 @@ class TestFrameAccessor:
+     @pytest.mark.parametrize("fill_value", [1, np.nan])
+     def test_to_coo_nonzero_fill_val_raises(self, fill_value):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         df = pd.DataFrame(
+             {
+                 "A": SparseArray(
+@@ -174,7 +175,7 @@ class TestFrameAccessor:
+     def test_to_coo_midx_categorical(self):
+         # GH#50996
+-        sp_sparse = pytest.importorskip("scipy.sparse")
++        sp_sparse = td.versioned_importorskip("scipy.sparse")
+         midx = pd.MultiIndex.from_arrays(
+             [
+@@ -219,7 +220,7 @@ class TestFrameAccessor:
+     @pytest.mark.parametrize("dtype", ["int64", "float64"])
+     @pytest.mark.parametrize("dense_index", [True, False])
+     def test_series_from_coo(self, dtype, dense_index):
+-        sp_sparse = pytest.importorskip("scipy.sparse")
++        sp_sparse = td.versioned_importorskip("scipy.sparse")
+         A = sp_sparse.eye(3, format="coo", dtype=dtype)
+         result = pd.Series.sparse.from_coo(A, dense_index=dense_index)
+@@ -239,7 +240,7 @@ class TestFrameAccessor:
+     def test_series_from_coo_incorrect_format_raises(self):
+         # gh-26554
+-        sp_sparse = pytest.importorskip("scipy.sparse")
++        sp_sparse = td.versioned_importorskip("scipy.sparse")
+         m = sp_sparse.csr_matrix(np.array([[0, 1], [0, 0]]))
+         with pytest.raises(
+--- a/pandas/tests/arrays/sparse/test_constructors.py
++++ b/pandas/tests/arrays/sparse/test_constructors.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas._libs.sparse import IntIndex
+ import pandas as pd
+@@ -188,7 +189,7 @@ class TestConstructors:
+     @pytest.mark.parametrize("format", ["coo", "csc", "csr"])
+     @pytest.mark.parametrize("size", [0, 10])
+     def test_from_spmatrix(self, size, format):
+-        sp_sparse = pytest.importorskip("scipy.sparse")
++        sp_sparse = td.versioned_importorskip("scipy.sparse")
+         mat = sp_sparse.random(size, 1, density=0.5, format=format)
+         result = SparseArray.from_spmatrix(mat)
+@@ -199,7 +200,7 @@ class TestConstructors:
+     @pytest.mark.parametrize("format", ["coo", "csc", "csr"])
+     def test_from_spmatrix_including_explicit_zero(self, format):
+-        sp_sparse = pytest.importorskip("scipy.sparse")
++        sp_sparse = td.versioned_importorskip("scipy.sparse")
+         mat = sp_sparse.random(10, 1, density=0.5, format=format)
+         mat.data[0] = 0
+@@ -210,7 +211,7 @@ class TestConstructors:
+         tm.assert_numpy_array_equal(result, expected)
+     def test_from_spmatrix_raises(self):
+-        sp_sparse = pytest.importorskip("scipy.sparse")
++        sp_sparse = td.versioned_importorskip("scipy.sparse")
+         mat = sp_sparse.eye(5, 4, format="csc")
+--- a/pandas/tests/arrays/string_/test_string.py
++++ b/pandas/tests/arrays/string_/test_string.py
+@@ -7,6 +7,7 @@ import operator
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.compat.pyarrow import pa_version_under12p0
+ from pandas.core.dtypes.common import is_dtype_equal
+@@ -486,7 +487,7 @@ def test_fillna_args(dtype, arrow_string
+ def test_arrow_array(dtype):
+     # protocol added in 0.15.0
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     import pyarrow.compute as pc
+     data = pd.array(["a", "b", "c"], dtype=dtype)
+@@ -502,7 +503,7 @@ def test_arrow_array(dtype):
+ @pytest.mark.filterwarnings("ignore:Passing a BlockManager:DeprecationWarning")
+ def test_arrow_roundtrip(dtype, string_storage2, request, using_infer_string):
+     # roundtrip possible from arrow 1.0.0
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     if using_infer_string and string_storage2 != "pyarrow_numpy":
+         request.applymarker(
+@@ -532,7 +533,7 @@ def test_arrow_load_from_zero_chunks(
+     dtype, string_storage2, request, using_infer_string
+ ):
+     # GH-41040
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     if using_infer_string and string_storage2 != "pyarrow_numpy":
+         request.applymarker(
+--- a/pandas/tests/arrays/string_/test_string_arrow.py
++++ b/pandas/tests/arrays/string_/test_string_arrow.py
+@@ -19,7 +19,7 @@ from pandas.core.arrays.string_arrow imp
+ def test_eq_all_na():
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     a = pd.array([pd.NA, pd.NA], dtype=StringDtype("pyarrow"))
+     result = a == a
+     expected = pd.array([pd.NA, pd.NA], dtype="boolean[pyarrow]")
+@@ -48,7 +48,7 @@ def test_config_bad_storage_raises():
+ @pytest.mark.parametrize("chunked", [True, False])
+ @pytest.mark.parametrize("array", ["numpy", "pyarrow"])
+ def test_constructor_not_string_type_raises(array, chunked, arrow_string_storage):
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     array = pa if array in arrow_string_storage else np
+@@ -69,7 +69,7 @@ def test_constructor_not_string_type_rai
+ @pytest.mark.parametrize("chunked", [True, False])
+ def test_constructor_not_string_type_value_dictionary_raises(chunked):
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     arr = pa.array([1, 2, 3], pa.dictionary(pa.int32(), pa.int32()))
+     if chunked:
+@@ -87,7 +87,7 @@ def test_constructor_not_string_type_val
+ )
+ @pytest.mark.parametrize("chunked", [True, False])
+ def test_constructor_valid_string_type_value_dictionary(chunked):
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     arr = pa.array(["1", "2", "3"], pa.large_string()).dictionary_encode()
+     if chunked:
+@@ -99,14 +99,14 @@ def test_constructor_valid_string_type_v
+ def test_constructor_from_list():
+     # GH#27673
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     result = pd.Series(["E"], dtype=StringDtype(storage="pyarrow"))
+     assert isinstance(result.dtype, StringDtype)
+     assert result.dtype.storage == "pyarrow"
+ def test_from_sequence_wrong_dtype_raises(using_infer_string):
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     with pd.option_context("string_storage", "python"):
+         ArrowStringArray._from_sequence(["a", None, "c"], dtype="string")
+@@ -199,7 +199,7 @@ def test_pyarrow_not_installed_raises():
+     ],
+ )
+ def test_setitem(multiple_chunks, key, value, expected):
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     result = pa.array(list("abcde"))
+     expected = pa.array(expected)
+@@ -216,7 +216,7 @@ def test_setitem(multiple_chunks, key, v
+ def test_setitem_invalid_indexer_raises():
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     arr = ArrowStringArray(pa.array(list("abcde")))
+@@ -242,7 +242,7 @@ def test_setitem_invalid_indexer_raises(
+ @pytest.mark.parametrize("dtype", ["string[pyarrow]", "string[pyarrow_numpy]"])
+ def test_pickle_roundtrip(dtype):
+     # GH 42600
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     expected = pd.Series(range(10), dtype=dtype)
+     expected_sliced = expected.head(2)
+     full_pickled = pickle.dumps(expected)
+@@ -259,7 +259,7 @@ def test_pickle_roundtrip(dtype):
+ def test_string_dtype_error_message():
+     # GH#55051
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     msg = "Storage must be 'python', 'pyarrow' or 'pyarrow_numpy'."
+     with pytest.raises(ValueError, match=msg):
+         StringDtype("bla")
+--- a/pandas/tests/computation/test_compat.py
++++ b/pandas/tests/computation/test_compat.py
+@@ -1,5 +1,6 @@
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.compat._optional import VERSIONS
+ import pandas as pd
+@@ -13,7 +14,7 @@ def test_compat():
+     from pandas.core.computation.check import NUMEXPR_INSTALLED
+-    ne = pytest.importorskip("numexpr")
++    ne = td.versioned_importorskip("numexpr")
+     ver = ne.__version__
+     if Version(ver) < Version(VERSIONS["numexpr"]):
+@@ -26,7 +27,7 @@ def test_compat():
+ @pytest.mark.parametrize("parser", expr.PARSERS)
+ def test_invalid_numexpr_version(engine, parser):
+     if engine == "numexpr":
+-        pytest.importorskip("numexpr")
++        td.versioned_importorskip("numexpr")
+     a, b = 1, 2  # noqa: F841
+     res = pd.eval("a + b", engine=engine, parser=parser)
+     assert res == 3
+--- a/pandas/tests/copy_view/test_astype.py
++++ b/pandas/tests/copy_view/test_astype.py
+@@ -45,7 +45,7 @@ def test_astype_single_dtype(using_copy_
+ @pytest.mark.parametrize("new_dtype", ["int64", "Int64", "int64[pyarrow]"])
+ def test_astype_avoids_copy(using_copy_on_write, dtype, new_dtype):
+     if new_dtype == "int64[pyarrow]":
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+     df = DataFrame({"a": [1, 2, 3]}, dtype=dtype)
+     df_orig = df.copy()
+     df2 = df.astype(new_dtype)
+@@ -70,7 +70,7 @@ def test_astype_avoids_copy(using_copy_o
+ @pytest.mark.parametrize("dtype", ["float64", "int32", "Int32", "int32[pyarrow]"])
+ def test_astype_different_target_dtype(using_copy_on_write, dtype):
+     if dtype == "int32[pyarrow]":
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+     df = DataFrame({"a": [1, 2, 3]})
+     df_orig = df.copy()
+     df2 = df.astype(dtype)
+@@ -198,7 +198,7 @@ def test_astype_different_timezones_diff
+ def test_astype_arrow_timestamp(using_copy_on_write):
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     df = DataFrame(
+         {
+             "a": [
+--- a/pandas/tests/dtypes/test_common.py
++++ b/pandas/tests/dtypes/test_common.py
+@@ -214,7 +214,7 @@ def test_is_sparse(check_scipy):
+ def test_is_scipy_sparse():
+-    sp_sparse = pytest.importorskip("scipy.sparse")
++    sp_sparse = td.versioned_importorskip("scipy.sparse")
+     assert com.is_scipy_sparse(sp_sparse.bsr_matrix([1, 2, 3]))
+--- a/pandas/tests/dtypes/test_inference.py
++++ b/pandas/tests/dtypes/test_inference.py
+@@ -28,6 +28,7 @@ import numpy as np
+ import pytest
+ import pytz
++import pandas.util._test_decorators as td
+ from pandas._libs import (
+     lib,
+     missing as libmissing,
+@@ -1984,7 +1985,7 @@ def test_nan_to_nat_conversions():
+ @pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
+ def test_is_scipy_sparse(spmatrix):
+-    pytest.importorskip("scipy")
++    td.versioned_importorskip("scipy")
+     assert is_scipy_sparse(spmatrix([[0, 1]]))
+     assert not is_scipy_sparse(np.array([1]))
+--- a/pandas/tests/extension/test_arrow.py
++++ b/pandas/tests/extension/test_arrow.py
+@@ -62,7 +62,7 @@ from pandas.api.types import (
+ )
+ from pandas.tests.extension import base
+-pa = pytest.importorskip("pyarrow")
++pa = td.versioned_importorskip("pyarrow")
+ from pandas.core.arrays.arrow.array import ArrowExtensionArray
+ from pandas.core.arrays.arrow.extension_types import ArrowPeriodType
+--- a/pandas/tests/extension/test_string.py
++++ b/pandas/tests/extension/test_string.py
+@@ -21,6 +21,7 @@ from typing import cast
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ import pandas._testing as tm
+ from pandas.api.types import is_string_dtype
+@@ -35,7 +36,7 @@ def maybe_split_array(arr, chunked):
+     elif arr.dtype.storage != "pyarrow":
+         return arr
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     arrow_array = arr._pa_array
+     split = len(arrow_array) // 2
+--- a/pandas/tests/frame/indexing/test_indexing.py
++++ b/pandas/tests/frame/indexing/test_indexing.py
+@@ -1945,7 +1945,7 @@ def test_adding_new_conditional_column()
+ )
+ def test_adding_new_conditional_column_with_string(dtype, infer_string) -> None:
+     # https://github.com/pandas-dev/pandas/issues/56204
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     df = DataFrame({"a": [1, 2], "b": [3, 4]})
+     with pd.option_context("future.infer_string", infer_string):
+@@ -1958,7 +1958,7 @@ def test_adding_new_conditional_column_w
+ def test_add_new_column_infer_string():
+     # GH#55366
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     df = DataFrame({"x": [1]})
+     with pd.option_context("future.infer_string", True):
+         df.loc[df["x"] == 1, "y"] = "1"
+--- a/pandas/tests/frame/indexing/test_setitem.py
++++ b/pandas/tests/frame/indexing/test_setitem.py
+@@ -760,7 +760,7 @@ class TestDataFrameSetItem:
+     def test_setitem_string_option_object_index(self):
+         # GH#55638
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         df = DataFrame({"a": [1, 2]})
+         with pd.option_context("future.infer_string", True):
+             df["b"] = Index(["a", "b"], dtype=object)
+--- a/pandas/tests/frame/methods/test_astype.py
++++ b/pandas/tests/frame/methods/test_astype.py
+@@ -893,7 +893,7 @@ def test_frame_astype_no_copy():
+ @pytest.mark.parametrize("dtype", ["int64", "Int64"])
+ def test_astype_copies(dtype):
+     # GH#50984
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     df = DataFrame({"a": [1, 2, 3]}, dtype=dtype)
+     result = df.astype("int64[pyarrow]", copy=True)
+     df.iloc[0, 0] = 100
+--- a/pandas/tests/frame/methods/test_convert_dtypes.py
++++ b/pandas/tests/frame/methods/test_convert_dtypes.py
+@@ -3,6 +3,7 @@ import datetime
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ import pandas._testing as tm
+@@ -49,7 +50,7 @@ class TestConvertDtypes:
+         assert result.columns.name == "cols"
+     def test_pyarrow_dtype_backend(self):
+-        pa = pytest.importorskip("pyarrow")
++        pa = td.versioned_importorskip("pyarrow")
+         df = pd.DataFrame(
+             {
+                 "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),
+@@ -105,13 +106,13 @@ class TestConvertDtypes:
+         tm.assert_frame_equal(result, expected)
+     def test_pyarrow_dtype_backend_already_pyarrow(self):
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         expected = pd.DataFrame([1, 2, 3], dtype="int64[pyarrow]")
+         result = expected.convert_dtypes(dtype_backend="pyarrow")
+         tm.assert_frame_equal(result, expected)
+     def test_pyarrow_dtype_backend_from_pandas_nullable(self):
+-        pa = pytest.importorskip("pyarrow")
++        pa = td.versioned_importorskip("pyarrow")
+         df = pd.DataFrame(
+             {
+                 "a": pd.Series([1, 2, None], dtype="Int32"),
+@@ -135,7 +136,7 @@ class TestConvertDtypes:
+     def test_pyarrow_dtype_empty_object(self):
+         # GH 50970
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         expected = pd.DataFrame(columns=[0])
+         result = expected.convert_dtypes(dtype_backend="pyarrow")
+         tm.assert_frame_equal(result, expected)
+@@ -152,7 +153,7 @@ class TestConvertDtypes:
+     def test_pyarrow_backend_no_conversion(self):
+         # GH#52872
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         df = pd.DataFrame({"a": [1, 2], "b": 1.5, "c": True, "d": "x"})
+         expected = df.copy()
+         result = df.convert_dtypes(
+@@ -166,7 +167,7 @@ class TestConvertDtypes:
+     def test_convert_dtypes_pyarrow_to_np_nullable(self):
+         # GH 53648
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         ser = pd.DataFrame(range(2), dtype="int32[pyarrow]")
+         result = ser.convert_dtypes(dtype_backend="numpy_nullable")
+         expected = pd.DataFrame(range(2), dtype="Int32")
+@@ -174,7 +175,7 @@ class TestConvertDtypes:
+     def test_convert_dtypes_pyarrow_timestamp(self):
+         # GH 54191
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         ser = pd.Series(pd.date_range("2020-01-01", "2020-01-02", freq="1min"))
+         expected = ser.astype("timestamp[ms][pyarrow]")
+         result = expected.convert_dtypes(dtype_backend="pyarrow")
+--- a/pandas/tests/frame/methods/test_cov_corr.py
++++ b/pandas/tests/frame/methods/test_cov_corr.py
+@@ -105,7 +105,7 @@ class TestDataFrameCorr:
+     @pytest.mark.parametrize("method", ["pearson", "kendall", "spearman"])
+     def test_corr_scipy_method(self, float_frame, method):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         float_frame.loc[float_frame.index[:5], "A"] = np.nan
+         float_frame.loc[float_frame.index[5:10], "B"] = np.nan
+         float_frame.loc[float_frame.index[:10], "A"] = float_frame["A"][10:20].copy()
+@@ -126,7 +126,7 @@ class TestDataFrameCorr:
+     @pytest.mark.parametrize("meth", ["pearson", "kendall", "spearman"])
+     def test_corr_nooverlap(self, meth):
+         # nothing in common
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         df = DataFrame(
+             {
+                 "A": [1, 1.5, 1, np.nan, np.nan, np.nan],
+@@ -159,7 +159,7 @@ class TestDataFrameCorr:
+         # when dtypes of pandas series are different
+         # then ndarray will have dtype=object,
+         # so it need to be properly handled
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         df = DataFrame({"a": [True, False], "b": [1, 0]})
+         expected = DataFrame(np.ones((2, 2)), index=["a", "b"], columns=["a", "b"])
+@@ -201,7 +201,7 @@ class TestDataFrameCorr:
+     @pytest.mark.parametrize("method", ["pearson", "spearman", "kendall"])
+     def test_corr_nullable_integer(self, nullable_column, other_column, method):
+         # https://github.com/pandas-dev/pandas/issues/33803
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         data = DataFrame({"a": nullable_column, "b": other_column})
+         result = data.corr(method=method)
+         expected = DataFrame(np.ones((2, 2)), columns=["a", "b"], index=["a", "b"])
+@@ -250,7 +250,7 @@ class TestDataFrameCorr:
+     @pytest.mark.parametrize("method", ["pearson", "spearman", "kendall"])
+     def test_corr_min_periods_greater_than_length(self, method):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         df = DataFrame({"A": [1, 2], "B": [1, 2]})
+         result = df.corr(method=method, min_periods=3)
+         expected = DataFrame(
+@@ -264,7 +264,7 @@ class TestDataFrameCorr:
+         # when dtypes of pandas series are different
+         # then ndarray will have dtype=object,
+         # so it need to be properly handled
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         df = DataFrame({"a": [1, 0], "b": [1, 0], "c": ["x", "y"]})
+         expected = DataFrame(np.ones((2, 2)), index=["a", "b"], columns=["a", "b"])
+         if numeric_only:
+@@ -433,7 +433,7 @@ class TestDataFrameCorrWith:
+     def test_corrwith_spearman(self):
+         # GH#21925
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         df = DataFrame(np.random.default_rng(2).random(size=(100, 3)))
+         result = df.corrwith(df**2, method="spearman")
+         expected = Series(np.ones(len(result)))
+@@ -441,7 +441,7 @@ class TestDataFrameCorrWith:
+     def test_corrwith_kendall(self):
+         # GH#21925
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         df = DataFrame(np.random.default_rng(2).random(size=(100, 3)))
+         result = df.corrwith(df**2, method="kendall")
+         expected = Series(np.ones(len(result)))
+@@ -449,7 +449,7 @@ class TestDataFrameCorrWith:
+     def test_corrwith_spearman_with_tied_data(self):
+         # GH#48826
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         df1 = DataFrame(
+             {
+                 "A": [1, np.nan, 7, 8],
+--- a/pandas/tests/frame/methods/test_describe.py
++++ b/pandas/tests/frame/methods/test_describe.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+     Categorical,
+@@ -398,7 +399,7 @@ class TestDataFrameDescribe:
+     def test_describe_exclude_pa_dtype(self):
+         # GH#52570
+-        pa = pytest.importorskip("pyarrow")
++        pa = td.versioned_importorskip("pyarrow")
+         df = DataFrame(
+             {
+                 "a": Series([1, 2, 3], dtype=pd.ArrowDtype(pa.int8())),
+--- a/pandas/tests/frame/methods/test_dot.py
++++ b/pandas/tests/frame/methods/test_dot.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import (
+     DataFrame,
+     Series,
+@@ -144,7 +145,7 @@ class TestDataFrameDot(DotSharedTests):
+     [("Float32", "Float64"), ("Int16", "Int32"), ("float[pyarrow]", "double[pyarrow]")],
+ )
+ def test_arrow_dtype(dtype, exp_dtype):
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     cols = ["a", "b"]
+     df_a = DataFrame([[1, 2], [3, 4], [5, 6]], columns=cols, dtype="int32")
+--- a/pandas/tests/frame/methods/test_info.py
++++ b/pandas/tests/frame/methods/test_info.py
+@@ -7,6 +7,7 @@ import textwrap
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.compat import (
+     IS64,
+     PYPY,
+@@ -527,7 +528,7 @@ def test_memory_usage_empty_no_warning()
+ @pytest.mark.single_cpu
+ def test_info_compute_numba():
+     # GH#51922
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     df = DataFrame([[1, 2], [3, 4]])
+     with option_context("compute.use_numba", True):
+--- a/pandas/tests/frame/methods/test_interpolate.py
++++ b/pandas/tests/frame/methods/test_interpolate.py
+@@ -213,7 +213,7 @@ class TestDataFrameInterpolate:
+             df.interpolate(method="values")
+     def test_interp_various(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         df = DataFrame(
+             {"A": [1, 2, np.nan, 4, 5, np.nan, 7], "C": [1, 2, 3, 5, 8, 13, 21]}
+         )
+@@ -252,7 +252,7 @@ class TestDataFrameInterpolate:
+         tm.assert_frame_equal(result, expected, check_dtype=False)
+     def test_interp_alt_scipy(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         df = DataFrame(
+             {"A": [1, 2, np.nan, 4, 5, np.nan, 7], "C": [1, 2, 3, 5, 8, 13, 21]}
+         )
+@@ -541,7 +541,7 @@ class TestDataFrameInterpolate:
+     )
+     def test_interpolate_arrow(self, dtype):
+         # GH#55347
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         df = DataFrame({"a": [1, None, None, None, 3]}, dtype=dtype + "[pyarrow]")
+         result = df.interpolate(limit=2)
+         expected = DataFrame({"a": [1, 1.5, 2.0, None, 3]}, dtype="float64[pyarrow]")
+--- a/pandas/tests/frame/methods/test_join.py
++++ b/pandas/tests/frame/methods/test_join.py
+@@ -3,6 +3,7 @@ from datetime import datetime
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.errors import MergeError
+ import pandas as pd
+@@ -163,7 +164,7 @@ def test_join_on_single_col_dup_on_right
+     # GH 46622
+     # Dups on right allowed by one_to_many constraint
+     if dtype == "string[pyarrow]":
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+     left_no_dup = left_no_dup.astype(dtype)
+     right_w_dups.index = right_w_dups.index.astype(dtype)
+     left_no_dup.join(
+--- a/pandas/tests/frame/methods/test_rank.py
++++ b/pandas/tests/frame/methods/test_rank.py
+@@ -6,6 +6,7 @@ from datetime import (
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas._libs.algos import (
+     Infinity,
+     NegInfinity,
+@@ -39,7 +40,7 @@ class TestRank:
+         return request.param
+     def test_rank(self, float_frame):
+-        sp_stats = pytest.importorskip("scipy.stats")
++        sp_stats = td.versioned_importorskip("scipy.stats")
+         float_frame.loc[::2, "A"] = np.nan
+         float_frame.loc[::3, "B"] = np.nan
+@@ -143,7 +144,7 @@ class TestRank:
+             float_string_frame.rank(axis=1)
+     def test_rank_na_option(self, float_frame):
+-        sp_stats = pytest.importorskip("scipy.stats")
++        sp_stats = td.versioned_importorskip("scipy.stats")
+         float_frame.loc[::2, "A"] = np.nan
+         float_frame.loc[::3, "B"] = np.nan
+@@ -227,7 +228,7 @@ class TestRank:
+     @pytest.mark.parametrize("ax", [0, 1])
+     @pytest.mark.parametrize("m", ["average", "min", "max", "first", "dense"])
+     def test_rank_methods_frame(self, ax, m):
+-        sp_stats = pytest.importorskip("scipy.stats")
++        sp_stats = td.versioned_importorskip("scipy.stats")
+         xs = np.random.default_rng(2).integers(0, 21, (100, 26))
+         xs = (xs - 10.0) / 10.0
+@@ -503,7 +504,7 @@ class TestRank:
+     )
+     def test_rank_string_dtype(self, dtype, exp_dtype):
+         # GH#55362
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         obj = Series(["foo", "foo", None, "foo"], dtype=dtype)
+         result = obj.rank(method="first")
+         expected = Series([1, 2, None, 3], dtype=exp_dtype)
+--- a/pandas/tests/frame/test_api.py
++++ b/pandas/tests/frame/test_api.py
+@@ -5,6 +5,7 @@ import pydoc
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas._config import using_pyarrow_string_dtype
+ from pandas._config.config import option_context
+@@ -288,7 +289,7 @@ class TestDataFrameMisc:
+     def test_tab_complete_warning(self, ip, frame_or_series):
+         # GH 16409
+-        pytest.importorskip("IPython", minversion="6.0.0")
++        td.versioned_importorskip("IPython", min_version="6.0.0")
+         from IPython.core.completer import provisionalcompleter
+         if frame_or_series is DataFrame:
+@@ -383,7 +384,7 @@ class TestDataFrameMisc:
+     def test_inspect_getmembers(self):
+         # GH38740
+-        pytest.importorskip("jinja2")
++        td.versioned_importorskip("jinja2")
+         df = DataFrame()
+         msg = "DataFrame._data is deprecated"
+         with tm.assert_produces_warning(
+--- a/pandas/tests/frame/test_arrow_interface.py
++++ b/pandas/tests/frame/test_arrow_interface.py
+@@ -6,7 +6,7 @@ import pandas.util._test_decorators as t
+ import pandas as pd
+-pa = pytest.importorskip("pyarrow")
++pa = td.versioned_importorskip("pyarrow")
+ @td.skip_if_no("pyarrow", min_version="14.0")
+--- a/pandas/tests/frame/test_constructors.py
++++ b/pandas/tests/frame/test_constructors.py
+@@ -2704,7 +2704,7 @@ class TestDataFrameConstructors:
+     def test_frame_string_inference(self):
+         # GH#54430
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         dtype = "string[pyarrow_numpy]"
+         expected = DataFrame(
+             {"a": ["a", "b"]}, dtype=dtype, columns=Index(["a"], dtype=dtype)
+@@ -2739,7 +2739,7 @@ class TestDataFrameConstructors:
+     def test_frame_string_inference_array_string_dtype(self):
+         # GH#54496
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         dtype = "string[pyarrow_numpy]"
+         expected = DataFrame(
+             {"a": ["a", "b"]}, dtype=dtype, columns=Index(["a"], dtype=dtype)
+@@ -2764,7 +2764,7 @@ class TestDataFrameConstructors:
+     def test_frame_string_inference_block_dim(self):
+         # GH#55363
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         with pd.option_context("future.infer_string", True):
+             df = DataFrame(np.array([["hello", "goodbye"], ["hello", "Hello"]]))
+         assert df._mgr.blocks[0].ndim == 2
+@@ -2852,7 +2852,7 @@ class TestDataFrameConstructorIndexInfer
+     )
+     def test_dict_data_arrow_column_expansion(self, key_val, col_vals, col_type):
+         # GH 53617
+-        pa = pytest.importorskip("pyarrow")
++        pa = td.versioned_importorskip("pyarrow")
+         cols = pd.arrays.ArrowExtensionArray(
+             pa.array(col_vals, type=pa.dictionary(pa.int8(), getattr(pa, col_type)()))
+         )
+--- a/pandas/tests/frame/test_query_eval.py
++++ b/pandas/tests/frame/test_query_eval.py
+@@ -1384,7 +1384,7 @@ class TestDataFrameQueryBacktickQuoting:
+     @pytest.mark.parametrize("dtype", ["int64", "Int64", "int64[pyarrow]"])
+     def test_query_ea_dtypes(self, dtype):
+         if dtype == "int64[pyarrow]":
+-            pytest.importorskip("pyarrow")
++            td.versioned_importorskip("pyarrow")
+         # GH#50261
+         df = DataFrame({"a": Series([1, 2], dtype=dtype)})
+         ref = {2}  # noqa: F841
+@@ -1402,7 +1402,7 @@ class TestDataFrameQueryBacktickQuoting:
+         if engine == "numexpr" and not NUMEXPR_INSTALLED:
+             pytest.skip("numexpr not installed")
+         if dtype == "int64[pyarrow]":
+-            pytest.importorskip("pyarrow")
++            td.versioned_importorskip("pyarrow")
+         df = DataFrame(
+             {"A": Series([1, 1, 2], dtype="Int64"), "B": Series([1, 2, 2], dtype=dtype)}
+         )
+--- a/pandas/tests/frame/test_reductions.py
++++ b/pandas/tests/frame/test_reductions.py
+@@ -369,7 +369,7 @@ class TestDataFrameAnalytics:
+         )
+     def test_stat_op_calc_skew_kurtosis(self, float_frame_with_na):
+-        sp_stats = pytest.importorskip("scipy.stats")
++        sp_stats = td.versioned_importorskip("scipy.stats")
+         def skewness(x):
+             if len(x) < 3:
+@@ -1162,7 +1162,7 @@ class TestDataFrameAnalytics:
+     def test_idxmax_arrow_types(self):
+         # GH#55368
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         df = DataFrame({"a": [2, 3, 1], "b": [2, 1, 1]}, dtype="int64[pyarrow]")
+         result = df.idxmax()
+@@ -2020,7 +2020,7 @@ def test_reduction_axis_none_returns_sca
+     result = getattr(df, method)(axis=None, numeric_only=numeric_only)
+     np_arr = df.to_numpy(dtype=np.float64)
+     if method in {"skew", "kurt"}:
+-        comp_mod = pytest.importorskip("scipy.stats")
++        comp_mod = td.versioned_importorskip("scipy.stats")
+         if method == "kurt":
+             method = "kurtosis"
+         expected = getattr(comp_mod, method)(np_arr, bias=False, axis=None)
+--- a/pandas/tests/frame/test_repr.py
++++ b/pandas/tests/frame/test_repr.py
+@@ -7,6 +7,7 @@ from io import StringIO
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas._config import using_pyarrow_string_dtype
+ from pandas import (
+@@ -287,7 +288,7 @@ NaT   4"""
+             assert "StringCol" in repr(df)
+     def test_latex_repr(self):
+-        pytest.importorskip("jinja2")
++        td.versioned_importorskip("jinja2")
+         expected = r"""\begin{tabular}{llll}
+ \toprule
+  & 0 & 1 & 2 \\
+@@ -475,7 +476,7 @@ NaT   4"""
+     def test_repr_ea_columns(self, any_string_dtype):
+         # GH#54797
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         df = DataFrame({"long_column_name": [1, 2, 3], "col2": [4, 5, 6]})
+         df.columns = df.columns.astype(any_string_dtype)
+         expected = """   long_column_name  col2
+--- a/pandas/tests/frame/test_subclass.py
++++ b/pandas/tests/frame/test_subclass.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+     DataFrame,
+@@ -669,7 +670,7 @@ class TestDataFrameSubclassing:
+         assert isinstance(result, tm.SubclassedSeries)
+     def test_corrwith(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         index = ["a", "b", "c", "d", "e"]
+         columns = ["one", "two", "three", "four"]
+         df1 = tm.SubclassedDataFrame(
+--- a/pandas/tests/frame/test_ufunc.py
++++ b/pandas/tests/frame/test_ufunc.py
+@@ -4,6 +4,7 @@ import re
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ import pandas._testing as tm
+ from pandas.api.types import is_extension_array_dtype
+@@ -250,7 +251,7 @@ def test_alignment_deprecation_many_inpu
+     # https://github.com/pandas-dev/pandas/issues/39184
+     # test that the deprecation also works with > 2 inputs -> using a numba
+     # written ufunc for this because numpy itself doesn't have such ufuncs
+-    numba = pytest.importorskip("numba")
++    numba = td.versioned_importorskip("numba")
+     @numba.vectorize([numba.float64(numba.float64, numba.float64, numba.float64)])
+     def my_ufunc(x, y, z):
+--- a/pandas/tests/generic/test_finalize.py
++++ b/pandas/tests/generic/test_finalize.py
+@@ -7,6 +7,7 @@ import re
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ import pandas._testing as tm
+@@ -443,7 +444,7 @@ def test_finalize_last(data):
+ @not_implemented_mark
+ def test_finalize_called_eval_numexpr():
+-    pytest.importorskip("numexpr")
++    td.versioned_importorskip("numexpr")
+     df = pd.DataFrame({"A": [1, 2]})
+     df.attrs["A"] = 1
+     result = df.eval("A + 1", engine="numexpr")
+--- a/pandas/tests/generic/test_to_xarray.py
++++ b/pandas/tests/generic/test_to_xarray.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import (
+     Categorical,
+     DataFrame,
+@@ -10,7 +11,7 @@ from pandas import (
+ )
+ import pandas._testing as tm
+-pytest.importorskip("xarray")
++td.versioned_importorskip("xarray")
+ class TestDataFrameToXArray:
+--- a/pandas/tests/groupby/aggregate/test_numba.py
++++ b/pandas/tests/groupby/aggregate/test_numba.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.errors import NumbaUtilError
+ from pandas import (
+@@ -22,7 +23,7 @@ pytestmark = pytest.mark.single_cpu
+ def test_correct_function_signature():
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     def incorrect_function(x):
+         return sum(x) * 2.7
+@@ -39,7 +40,7 @@ def test_correct_function_signature():
+ def test_check_nopython_kwargs():
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     def incorrect_function(values, index):
+         return sum(values) * 2.7
+@@ -61,7 +62,7 @@ def test_check_nopython_kwargs():
+ @pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])
+ @pytest.mark.parametrize("as_index", [True, False])
+ def test_numba_vs_cython(jit, pandas_obj, nogil, parallel, nopython, as_index):
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     def func_numba(values, index):
+         return np.mean(values) * 2.7
+@@ -92,7 +93,7 @@ def test_numba_vs_cython(jit, pandas_obj
+ @pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])
+ def test_cache(jit, pandas_obj, nogil, parallel, nopython):
+     # Test that the functions are cached correctly if we switch functions
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     def func_1(values, index):
+         return np.mean(values) - 3.4
+@@ -130,7 +131,7 @@ def test_cache(jit, pandas_obj, nogil, p
+ def test_use_global_config():
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     def func_1(values, index):
+         return np.mean(values) - 3.4
+@@ -155,7 +156,7 @@ def test_use_global_config():
+     ],
+ )
+ def test_multifunc_numba_vs_cython_frame(agg_kwargs):
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     data = DataFrame(
+         {
+             0: ["a", "a", "b", "b", "a"],
+@@ -190,7 +191,7 @@ def test_multifunc_numba_vs_cython_frame
+     ],
+ )
+ def test_multifunc_numba_udf_frame(agg_kwargs, expected_func):
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     data = DataFrame(
+         {
+             0: ["a", "a", "b", "b", "a"],
+@@ -212,7 +213,7 @@ def test_multifunc_numba_udf_frame(agg_k
+     [{"func": ["min", "max"]}, {"func": "min"}, {"min_val": "min", "max_val": "max"}],
+ )
+ def test_multifunc_numba_vs_cython_series(agg_kwargs):
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     labels = ["a", "a", "b", "b", "a"]
+     data = Series([1.0, 2.0, 3.0, 4.0, 5.0])
+     grouped = data.groupby(labels)
+@@ -265,7 +266,7 @@ def test_multifunc_numba_vs_cython_serie
+     strict=False,
+ )
+ def test_multifunc_numba_kwarg_propagation(data, agg_kwargs):
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     labels = ["a", "a", "b", "b", "a"]
+     grouped = data.groupby(labels)
+     result = grouped.agg(**agg_kwargs, engine="numba", engine_kwargs={"parallel": True})
+@@ -278,7 +279,7 @@ def test_multifunc_numba_kwarg_propagati
+ def test_args_not_cached():
+     # GH 41647
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     def sum_last(values, index, n):
+         return values[-n:].sum()
+@@ -296,7 +297,7 @@ def test_args_not_cached():
+ def test_index_data_correctly_passed():
+     # GH 43133
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     def f(values, index):
+         return np.mean(index)
+@@ -312,7 +313,7 @@ def test_index_data_correctly_passed():
+ def test_engine_kwargs_not_cached():
+     # If the user passes a different set of engine_kwargs don't return the same
+     # jitted function
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     nogil = True
+     parallel = False
+     nopython = True
+@@ -339,7 +340,7 @@ def test_engine_kwargs_not_cached():
+ @pytest.mark.filterwarnings("ignore")
+ def test_multiindex_one_key(nogil, parallel, nopython):
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     def numba_func(values, index):
+         return 1
+@@ -354,7 +355,7 @@ def test_multiindex_one_key(nogil, paral
+ def test_multiindex_multi_key_not_supported(nogil, parallel, nopython):
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     def numba_func(values, index):
+         return 1
+@@ -368,7 +369,7 @@ def test_multiindex_multi_key_not_suppor
+ def test_multilabel_numba_vs_cython(numba_supported_reductions):
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     reduction, kwargs = numba_supported_reductions
+     df = DataFrame(
+         {
+@@ -389,7 +390,7 @@ def test_multilabel_numba_vs_cython(numb
+ def test_multilabel_udf_numba_vs_cython():
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     df = DataFrame(
+         {
+             "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
+--- a/pandas/tests/groupby/test_counting.py
++++ b/pandas/tests/groupby/test_counting.py
+@@ -4,6 +4,7 @@ from string import ascii_lowercase
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import (
+     DataFrame,
+     Index,
+@@ -385,7 +386,7 @@ def test_count_uses_size_on_exception():
+ def test_count_arrow_string_array(any_string_dtype):
+     # GH#54751
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     df = DataFrame(
+         {"a": [1, 2, 3], "b": Series(["a", "b", "a"], dtype=any_string_dtype)}
+     )
+--- a/pandas/tests/groupby/test_groupby.py
++++ b/pandas/tests/groupby/test_groupby.py
+@@ -2596,7 +2596,7 @@ def test_groupby_column_index_name_lost(
+ def test_groupby_duplicate_columns(infer_string):
+     # GH: 31735
+     if infer_string:
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+     df = DataFrame(
+         {"A": ["f", "e", "g", "h"], "B": ["a", "b", "c", "d"], "C": [1, 2, 3, 4]}
+     ).astype(object)
+--- a/pandas/tests/groupby/test_numba.py
++++ b/pandas/tests/groupby/test_numba.py
+@@ -1,5 +1,6 @@
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import (
+     DataFrame,
+     Series,
+@@ -9,7 +10,7 @@ import pandas._testing as tm
+ pytestmark = pytest.mark.single_cpu
+-pytest.importorskip("numba")
++td.versioned_importorskip("numba")
+ @pytest.mark.filterwarnings("ignore")
+--- a/pandas/tests/groupby/test_reductions.py
++++ b/pandas/tests/groupby/test_reductions.py
+@@ -701,7 +701,7 @@ def test_groupby_min_max_categorical(fun
+ @pytest.mark.parametrize("func", ["min", "max"])
+ def test_min_empty_string_dtype(func):
+     # GH#55619
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     dtype = "string[pyarrow_numpy]"
+     df = DataFrame({"a": ["a"], "b": "a", "c": "a"}, dtype=dtype).iloc[:0]
+     result = getattr(df.groupby("a"), func)()
+--- a/pandas/tests/groupby/test_timegrouper.py
++++ b/pandas/tests/groupby/test_timegrouper.py
+@@ -10,6 +10,7 @@ import numpy as np
+ import pytest
+ import pytz
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+     DataFrame,
+@@ -944,7 +945,7 @@ class TestGroupBy:
+     def test_groupby_agg_numba_timegrouper_with_nat(
+         self, groupby_with_truncated_bingrouper
+     ):
+-        pytest.importorskip("numba")
++        td.versioned_importorskip("numba")
+         # See discussion in GH#43487
+         gb = groupby_with_truncated_bingrouper
+--- a/pandas/tests/groupby/transform/test_numba.py
++++ b/pandas/tests/groupby/transform/test_numba.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.errors import NumbaUtilError
+ from pandas import (
+@@ -14,7 +15,7 @@ pytestmark = pytest.mark.single_cpu
+ def test_correct_function_signature():
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     def incorrect_function(x):
+         return x + 1
+@@ -31,7 +32,7 @@ def test_correct_function_signature():
+ def test_check_nopython_kwargs():
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     def incorrect_function(values, index):
+         return values + 1
+@@ -53,7 +54,7 @@ def test_check_nopython_kwargs():
+ @pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])
+ @pytest.mark.parametrize("as_index", [True, False])
+ def test_numba_vs_cython(jit, pandas_obj, nogil, parallel, nopython, as_index):
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     def func(values, index):
+         return values + 1
+@@ -84,7 +85,7 @@ def test_numba_vs_cython(jit, pandas_obj
+ @pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])
+ def test_cache(jit, pandas_obj, nogil, parallel, nopython):
+     # Test that the functions are cached correctly if we switch functions
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     def func_1(values, index):
+         return values + 1
+@@ -121,7 +122,7 @@ def test_cache(jit, pandas_obj, nogil, p
+ def test_use_global_config():
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     def func_1(values, index):
+         return values + 1
+@@ -141,7 +142,7 @@ def test_use_global_config():
+     "agg_func", [["min", "max"], "min", {"B": ["min", "max"], "C": "sum"}]
+ )
+ def test_string_cython_vs_numba(agg_func, numba_supported_reductions):
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     agg_func, kwargs = numba_supported_reductions
+     data = DataFrame(
+         {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1]
+@@ -159,7 +160,7 @@ def test_string_cython_vs_numba(agg_func
+ def test_args_not_cached():
+     # GH 41647
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     def sum_last(values, index, n):
+         return values[-n:].sum()
+@@ -177,7 +178,7 @@ def test_args_not_cached():
+ def test_index_data_correctly_passed():
+     # GH 43133
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     def f(values, index):
+         return index - 1
+@@ -191,7 +192,7 @@ def test_index_data_correctly_passed():
+ def test_engine_kwargs_not_cached():
+     # If the user passes a different set of engine_kwargs don't return the same
+     # jitted function
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     nogil = True
+     parallel = False
+     nopython = True
+@@ -218,7 +219,7 @@ def test_engine_kwargs_not_cached():
+ @pytest.mark.filterwarnings("ignore")
+ def test_multiindex_one_key(nogil, parallel, nopython):
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     def numba_func(values, index):
+         return 1
+@@ -233,7 +234,7 @@ def test_multiindex_one_key(nogil, paral
+ def test_multiindex_multi_key_not_supported(nogil, parallel, nopython):
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     def numba_func(values, index):
+         return 1
+@@ -247,7 +248,7 @@ def test_multiindex_multi_key_not_suppor
+ def test_multilabel_numba_vs_cython(numba_supported_reductions):
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     reduction, kwargs = numba_supported_reductions
+     df = DataFrame(
+         {
+@@ -264,7 +265,7 @@ def test_multilabel_numba_vs_cython(numb
+ def test_multilabel_udf_numba_vs_cython():
+-    pytest.importorskip("numba")
++    td.versioned_importorskip("numba")
+     df = DataFrame(
+         {
+             "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
+--- a/pandas/tests/indexes/base_class/test_constructors.py
++++ b/pandas/tests/indexes/base_class/test_constructors.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+     Index,
+@@ -47,7 +48,7 @@ class TestIndexConstructor:
+     def test_index_string_inference(self):
+         # GH#54430
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         dtype = "string[pyarrow_numpy]"
+         expected = Index(["a", "b"], dtype=dtype)
+         with pd.option_context("future.infer_string", True):
+--- a/pandas/tests/indexes/base_class/test_reshape.py
++++ b/pandas/tests/indexes/base_class/test_reshape.py
+@@ -4,6 +4,7 @@ Tests for ndarray-like method on the bas
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import Index
+ import pandas._testing as tm
+@@ -58,7 +59,7 @@ class TestReshape:
+     def test_insert_none_into_string_numpy(self):
+         # GH#55365
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         index = Index(["a", "b", "c"], dtype="string[pyarrow_numpy]")
+         result = index.insert(-1, None)
+         expected = Index(["a", "b", None, "c"], dtype="string[pyarrow_numpy]")
+--- a/pandas/tests/indexes/multi/test_constructors.py
++++ b/pandas/tests/indexes/multi/test_constructors.py
+@@ -7,6 +7,7 @@ import itertools
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
+ import pandas as pd
+@@ -648,7 +649,7 @@ def test_from_frame():
+ def test_from_frame_missing_values_multiIndex():
+     # GH 39984
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     df = pd.DataFrame(
+         {
+--- a/pandas/tests/indexes/numeric/test_indexing.py
++++ b/pandas/tests/indexes/numeric/test_indexing.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.errors import InvalidIndexError
+ from pandas import (
+@@ -385,7 +386,7 @@ class TestGetIndexer:
+     def test_get_indexer_masked_na_boolean(self, dtype):
+         # GH#39133
+         if dtype == "bool[pyarrow]":
+-            pytest.importorskip("pyarrow")
++            td.versioned_importorskip("pyarrow")
+         idx = Index([True, False, NA], dtype=dtype)
+         result = idx.get_loc(False)
+         assert result == 1
+@@ -393,7 +394,7 @@ class TestGetIndexer:
+         assert result == 2
+     def test_get_indexer_arrow_dictionary_target(self):
+-        pa = pytest.importorskip("pyarrow")
++        pa = td.versioned_importorskip("pyarrow")
+         target = Index(
+             ArrowExtensionArray(
+                 pa.array([1, 2], type=pa.dictionary(pa.int8(), pa.int8()))
+--- a/pandas/tests/indexes/test_base.py
++++ b/pandas/tests/indexes/test_base.py
+@@ -1285,7 +1285,7 @@ class TestIndex:
+     def test_tab_complete_warning(self, ip):
+         # https://github.com/pandas-dev/pandas/issues/16409
+-        pytest.importorskip("IPython", minversion="6.0.0")
++        td.versioned_importorskip("IPython", min_version="6.0.0")
+         from IPython.core.completer import provisionalcompleter
+         code = "import pandas as pd; idx = pd.Index([1, 2])"
+--- a/pandas/tests/indexing/test_datetime.py
++++ b/pandas/tests/indexing/test_datetime.py
+@@ -2,6 +2,7 @@ import re
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+     DataFrame,
+@@ -174,7 +175,7 @@ class TestDatetimeIndex:
+     def test_getitem_pyarrow_index(self, frame_or_series):
+         # GH 53644
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         obj = frame_or_series(
+             range(5),
+             index=date_range("2020", freq="D", periods=5).astype(
+--- a/pandas/tests/indexing/test_loc.py
++++ b/pandas/tests/indexing/test_loc.py
+@@ -1308,7 +1308,7 @@ class TestLocBaseIndependent:
+     @pytest.mark.parametrize("spmatrix_t", ["coo_matrix", "csc_matrix", "csr_matrix"])
+     @pytest.mark.parametrize("dtype", [np.int64, np.float64, complex])
+     def test_loc_getitem_range_from_spmatrix(self, spmatrix_t, dtype):
+-        sp_sparse = pytest.importorskip("scipy.sparse")
++        sp_sparse = td.versioned_importorskip("scipy.sparse")
+         spmatrix_t = getattr(sp_sparse, spmatrix_t)
+@@ -1337,7 +1337,7 @@ class TestLocBaseIndependent:
+     def test_loc_getitem_sparse_frame(self):
+         # GH34687
+-        sp_sparse = pytest.importorskip("scipy.sparse")
++        sp_sparse = td.versioned_importorskip("scipy.sparse")
+         df = DataFrame.sparse.from_spmatrix(sp_sparse.eye(5))
+         result = df.loc[range(2)]
+@@ -3078,7 +3078,7 @@ def test_loc_periodindex_3_levels():
+ def test_loc_setitem_pyarrow_strings():
+     # GH#52319
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     df = DataFrame(
+         {
+             "strings": Series(["A", "B", "C"], dtype="string[pyarrow]"),
+--- a/pandas/tests/interchange/test_impl.py
++++ b/pandas/tests/interchange/test_impl.py
+@@ -6,6 +6,7 @@ from datetime import (
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas._libs.tslibs import iNaT
+ from pandas.compat import (
+     is_ci_environment,
+@@ -67,7 +68,7 @@ def test_categorical_dtype(data, data_ca
+ def test_categorical_pyarrow():
+     # GH 49889
+-    pa = pytest.importorskip("pyarrow", "11.0.0")
++    pa = td.versioned_importorskip("pyarrow", "11.0.0")
+     arr = ["Mon", "Tue", "Mon", "Wed", "Mon", "Thu", "Fri", "Sat", "Sun"]
+     table = pa.table({"weekday": pa.array(arr).dictionary_encode()})
+@@ -82,7 +83,7 @@ def test_categorical_pyarrow():
+ def test_empty_categorical_pyarrow():
+     # https://github.com/pandas-dev/pandas/issues/53077
+-    pa = pytest.importorskip("pyarrow", "11.0.0")
++    pa = td.versioned_importorskip("pyarrow", "11.0.0")
+     arr = [None]
+     table = pa.table({"arr": pa.array(arr, "float64").dictionary_encode()})
+@@ -94,7 +95,7 @@ def test_empty_categorical_pyarrow():
+ def test_large_string_pyarrow():
+     # GH 52795
+-    pa = pytest.importorskip("pyarrow", "11.0.0")
++    pa = td.versioned_importorskip("pyarrow", "11.0.0")
+     arr = ["Mon", "Tue"]
+     table = pa.table({"weekday": pa.array(arr, "large_string")})
+@@ -120,7 +121,7 @@ def test_large_string_pyarrow():
+ )
+ def test_bitmasks_pyarrow(offset, length, expected_values):
+     # GH 52795
+-    pa = pytest.importorskip("pyarrow", "11.0.0")
++    pa = td.versioned_importorskip("pyarrow", "11.0.0")
+     arr = [3.3, None, 2.1]
+     table = pa.table({"arr": arr}).slice(offset, length)
+@@ -282,7 +283,7 @@ def test_categorical_to_numpy_dlpack():
+ @pytest.mark.parametrize("data", [{}, {"a": []}])
+ def test_empty_pyarrow(data):
+     # GH 53155
+-    pytest.importorskip("pyarrow", "11.0.0")
++    td.versioned_importorskip("pyarrow", "11.0.0")
+     from pyarrow.interchange import from_dataframe as pa_from_dataframe
+     expected = pd.DataFrame(data)
+@@ -292,7 +293,7 @@ def test_empty_pyarrow(data):
+ def test_multi_chunk_pyarrow() -> None:
+-    pa = pytest.importorskip("pyarrow", "11.0.0")
++    pa = td.versioned_importorskip("pyarrow", "11.0.0")
+     n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]])
+     names = ["n_legs"]
+     table = pa.table([n_legs], names=names)
+@@ -305,7 +306,7 @@ def test_multi_chunk_pyarrow() -> None:
+ def test_multi_chunk_column() -> None:
+-    pytest.importorskip("pyarrow", "11.0.0")
++    td.versioned_importorskip("pyarrow", "11.0.0")
+     ser = pd.Series([1, 2, None], dtype="Int64[pyarrow]")
+     df = pd.concat([ser, ser], ignore_index=True).to_frame("a")
+     df_orig = df.copy()
+@@ -327,7 +328,7 @@ def test_multi_chunk_column() -> None:
+ def test_timestamp_ns_pyarrow():
+     # GH 56712
+-    pytest.importorskip("pyarrow", "11.0.0")
++    td.versioned_importorskip("pyarrow", "11.0.0")
+     timestamp_args = {
+         "year": 2000,
+         "month": 1,
+@@ -362,7 +363,7 @@ def test_datetimetzdtype(tz, unit):
+ def test_interchange_from_non_pandas_tz_aware(request):
+     # GH 54239, 54287
+-    pa = pytest.importorskip("pyarrow", "11.0.0")
++    pa = td.versioned_importorskip("pyarrow", "11.0.0")
+     import pyarrow.compute as pc
+     if is_platform_windows() and is_ci_environment():
+@@ -420,7 +421,7 @@ def test_empty_string_column():
+ def test_large_string():
+     # GH#56702
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     df = pd.DataFrame({"a": ["x"]}, dtype="large_string[pyarrow]")
+     result = pd.api.interchange.from_dataframe(df.__dataframe__())
+     expected = pd.DataFrame({"a": ["x"]}, dtype="object")
+@@ -500,7 +501,7 @@ def test_pandas_nullable_with_missing_va
+ ) -> None:
+     # https://github.com/pandas-dev/pandas/issues/57643
+     # https://github.com/pandas-dev/pandas/issues/57664
+-    pa = pytest.importorskip("pyarrow", "11.0.0")
++    pa = td.versioned_importorskip("pyarrow", "11.0.0")
+     import pyarrow.interchange as pai
+     if expected_dtype == "timestamp[us, tz=Asia/Kathmandu]":
+@@ -562,7 +563,7 @@ def test_pandas_nullable_without_missing
+     data: list, dtype: str, expected_dtype: str
+ ) -> None:
+     # https://github.com/pandas-dev/pandas/issues/57643
+-    pa = pytest.importorskip("pyarrow", "11.0.0")
++    pa = td.versioned_importorskip("pyarrow", "11.0.0")
+     import pyarrow.interchange as pai
+     if expected_dtype == "timestamp[us, tz=Asia/Kathmandu]":
+@@ -578,7 +579,7 @@ def test_pandas_nullable_without_missing
+ def test_string_validity_buffer() -> None:
+     # https://github.com/pandas-dev/pandas/issues/57761
+-    pytest.importorskip("pyarrow", "11.0.0")
++    td.versioned_importorskip("pyarrow", "11.0.0")
+     df = pd.DataFrame({"a": ["x"]}, dtype="large_string[pyarrow]")
+     result = df.__dataframe__().get_column_by_name("a").get_buffers()["validity"]
+     assert result is None
+@@ -586,7 +587,7 @@ def test_string_validity_buffer() -> Non
+ def test_string_validity_buffer_no_missing() -> None:
+     # https://github.com/pandas-dev/pandas/issues/57762
+-    pytest.importorskip("pyarrow", "11.0.0")
++    td.versioned_importorskip("pyarrow", "11.0.0")
+     df = pd.DataFrame({"a": ["x", None]}, dtype="large_string[pyarrow]")
+     validity = df.__dataframe__().get_column_by_name("a").get_buffers()["validity"]
+     assert validity is not None
+--- a/pandas/tests/interchange/test_utils.py
++++ b/pandas/tests/interchange/test_utils.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas.core.interchange.utils import dtype_to_arrow_c_fmt
+@@ -78,7 +79,7 @@ def test_dtype_to_arrow_c_fmt(pandas_dty
+ )
+ def test_dtype_to_arrow_c_fmt_arrowdtype(pa_dtype, args_kwargs, c_string):
+     # GH 52323
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     if not args_kwargs:
+         pa_type = getattr(pa, pa_dtype)()
+     elif isinstance(args_kwargs, tuple):
+--- a/pandas/tests/io/conftest.py
++++ b/pandas/tests/io/conftest.py
+@@ -58,8 +58,8 @@ def s3_base(worker_id, monkeypatch):
+     Sets up moto server in separate process locally
+     Return url for motoserver/moto CI service
+     """
+-    pytest.importorskip("s3fs")
+-    pytest.importorskip("boto3")
++    td.versioned_importorskip("s3fs")
++    td.versioned_importorskip("boto3")
+     # temporary workaround as moto fails for botocore >= 1.11 otherwise,
+     # see https://github.com/spulec/moto/issues/1924 & 1952
+@@ -80,9 +80,9 @@ def s3_base(worker_id, monkeypatch):
+             # set in .github/workflows/unit-tests.yml
+             yield "http://localhost:5000"
+     else:
+-        requests = pytest.importorskip("requests")
+-        pytest.importorskip("moto")
+-        pytest.importorskip("flask")  # server mode needs flask too
++        requests = td.versioned_importorskip("requests")
++        td.versioned_importorskip("moto")
++        td.versioned_importorskip("flask")  # server mode needs flask too
+         # Launching moto in server mode, i.e., as a separate process
+         # with an S3 endpoint on localhost
+--- a/pandas/tests/io/excel/test_odf.py
++++ b/pandas/tests/io/excel/test_odf.py
+@@ -3,12 +3,13 @@ import functools
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.compat import is_platform_windows
+ import pandas as pd
+ import pandas._testing as tm
+-pytest.importorskip("odf")
++td.versioned_importorskip("odf")
+ if is_platform_windows():
+     pytestmark = pytest.mark.single_cpu
+--- a/pandas/tests/io/excel/test_odswriter.py
++++ b/pandas/tests/io/excel/test_odswriter.py
+@@ -6,6 +6,7 @@ import re
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.compat import is_platform_windows
+ import pandas as pd
+@@ -13,7 +14,7 @@ import pandas._testing as tm
+ from pandas.io.excel import ExcelWriter
+-odf = pytest.importorskip("odf")
++odf = td.versioned_importorskip("odf")
+ if is_platform_windows():
+     pytestmark = pytest.mark.single_cpu
+--- a/pandas/tests/io/excel/test_openpyxl.py
++++ b/pandas/tests/io/excel/test_openpyxl.py
+@@ -5,6 +5,7 @@ import re
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.compat import is_platform_windows
+ import pandas as pd
+@@ -17,7 +18,7 @@ from pandas.io.excel import (
+ )
+ from pandas.io.excel._openpyxl import OpenpyxlReader
+-openpyxl = pytest.importorskip("openpyxl")
++openpyxl = td.versioned_importorskip("openpyxl")
+ if is_platform_windows():
+     pytestmark = pytest.mark.single_cpu
+--- a/pandas/tests/io/excel/test_readers.py
++++ b/pandas/tests/io/excel/test_readers.py
+@@ -667,7 +667,7 @@ class TestReaders:
+         if read_ext in (".xlsb", ".xls"):
+             pytest.skip(f"No engine for filetype: '{read_ext}'")
+-        pa = pytest.importorskip("pyarrow")
++        pa = td.versioned_importorskip("pyarrow")
+         with pd.option_context("mode.string_storage", string_storage):
+             df = DataFrame(
+--- a/pandas/tests/io/excel/test_style.py
++++ b/pandas/tests/io/excel/test_style.py
+@@ -16,7 +16,7 @@ import pandas._testing as tm
+ from pandas.io.excel import ExcelWriter
+ from pandas.io.formats.excel import ExcelFormatter
+-pytest.importorskip("jinja2")
++td.versioned_importorskip("jinja2")
+ # jinja2 is currently required for Styler.__init__(). Technically Styler.to_excel
+ # could compute styles and render to excel without jinja2, since there is no
+ # 'template' file, but this needs the import error to delayed until render time.
+@@ -41,14 +41,14 @@ def assert_equal_cell_styles(cell1, cell
+ )
+ def test_styler_to_excel_unstyled(engine):
+     # compare DataFrame.to_excel and Styler.to_excel when no styles applied
+-    pytest.importorskip(engine)
++    td.versioned_importorskip(engine)
+     df = DataFrame(np.random.default_rng(2).standard_normal((2, 2)))
+     with tm.ensure_clean(".xlsx") as path:
+         with ExcelWriter(path, engine=engine) as writer:
+             df.to_excel(writer, sheet_name="dataframe")
+             df.style.to_excel(writer, sheet_name="unstyled")
+-        openpyxl = pytest.importorskip("openpyxl")  # test loading only with openpyxl
++        openpyxl = td.versioned_importorskip("openpyxl")  # test loading only with openpyxl
+         with contextlib.closing(openpyxl.load_workbook(path)) as wb:
+             for col1, col2 in zip(wb["dataframe"].columns, wb["unstyled"].columns):
+                 assert len(col1) == len(col2)
+@@ -133,7 +133,7 @@ shared_style_params = [
+ )
+ @pytest.mark.parametrize("css, attrs, expected", shared_style_params)
+ def test_styler_to_excel_basic(engine, css, attrs, expected):
+-    pytest.importorskip(engine)
++    td.versioned_importorskip(engine)
+     df = DataFrame(np.random.default_rng(2).standard_normal((1, 1)))
+     styler = df.style.map(lambda x: css)
+@@ -142,7 +142,7 @@ def test_styler_to_excel_basic(engine, c
+             df.to_excel(writer, sheet_name="dataframe")
+             styler.to_excel(writer, sheet_name="styled")
+-        openpyxl = pytest.importorskip("openpyxl")  # test loading only with openpyxl
++        openpyxl = td.versioned_importorskip("openpyxl")  # test loading only with openpyxl
+         with contextlib.closing(openpyxl.load_workbook(path)) as wb:
+             # test unstyled data cell does not have expected styles
+             # test styled cell has expected styles
+@@ -164,7 +164,7 @@ def test_styler_to_excel_basic(engine, c
+ )
+ @pytest.mark.parametrize("css, attrs, expected", shared_style_params)
+ def test_styler_to_excel_basic_indexes(engine, css, attrs, expected):
+-    pytest.importorskip(engine)
++    td.versioned_importorskip(engine)
+     df = DataFrame(np.random.default_rng(2).standard_normal((1, 1)))
+     styler = df.style
+@@ -181,7 +181,7 @@ def test_styler_to_excel_basic_indexes(e
+             null_styler.to_excel(writer, sheet_name="null_styled")
+             styler.to_excel(writer, sheet_name="styled")
+-        openpyxl = pytest.importorskip("openpyxl")  # test loading only with openpyxl
++        openpyxl = td.versioned_importorskip("openpyxl")  # test loading only with openpyxl
+         with contextlib.closing(openpyxl.load_workbook(path)) as wb:
+             # test null styled index cells does not have expected styles
+             # test styled cell has expected styles
+@@ -233,7 +233,7 @@ def test_styler_to_excel_border_style(en
+     attrs = ["border", "left", "style"]
+     expected = border_style
+-    pytest.importorskip(engine)
++    td.versioned_importorskip(engine)
+     df = DataFrame(np.random.default_rng(2).standard_normal((1, 1)))
+     styler = df.style.map(lambda x: css)
+@@ -242,7 +242,7 @@ def test_styler_to_excel_border_style(en
+             df.to_excel(writer, sheet_name="dataframe")
+             styler.to_excel(writer, sheet_name="styled")
+-        openpyxl = pytest.importorskip("openpyxl")  # test loading only with openpyxl
++        openpyxl = td.versioned_importorskip("openpyxl")  # test loading only with openpyxl
+         with contextlib.closing(openpyxl.load_workbook(path)) as wb:
+             # test unstyled data cell does not have expected styles
+             # test styled cell has expected styles
+@@ -259,7 +259,7 @@ def test_styler_to_excel_border_style(en
+ def test_styler_custom_converter():
+-    openpyxl = pytest.importorskip("openpyxl")
++    openpyxl = td.versioned_importorskip("openpyxl")
+     def custom_converter(css):
+         return {"font": {"color": {"rgb": "111222"}}}
+--- a/pandas/tests/io/excel/test_xlrd.py
++++ b/pandas/tests/io/excel/test_xlrd.py
+@@ -3,6 +3,7 @@ import io
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.compat import is_platform_windows
+ import pandas as pd
+@@ -11,7 +12,7 @@ import pandas._testing as tm
+ from pandas.io.excel import ExcelFile
+ from pandas.io.excel._base import inspect_excel_format
+-xlrd = pytest.importorskip("xlrd")
++xlrd = td.versioned_importorskip("xlrd")
+ if is_platform_windows():
+     pytestmark = pytest.mark.single_cpu
+--- a/pandas/tests/io/excel/test_xlsxwriter.py
++++ b/pandas/tests/io/excel/test_xlsxwriter.py
+@@ -2,6 +2,7 @@ import contextlib
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.compat import is_platform_windows
+ from pandas import DataFrame
+@@ -9,7 +10,7 @@ import pandas._testing as tm
+ from pandas.io.excel import ExcelWriter
+-xlsxwriter = pytest.importorskip("xlsxwriter")
++xlsxwriter = td.versioned_importorskip("xlsxwriter")
+ if is_platform_windows():
+     pytestmark = pytest.mark.single_cpu
+@@ -23,7 +24,7 @@ def ext():
+ def test_column_format(ext):
+     # Test that column formats are applied to cells. Test for issue #9167.
+     # Applicable to xlsxwriter only.
+-    openpyxl = pytest.importorskip("openpyxl")
++    openpyxl = td.versioned_importorskip("openpyxl")
+     with tm.ensure_clean(ext) as path:
+         frame = DataFrame({"A": [123456, 123456], "B": [123456, 123456]})
+--- a/pandas/tests/io/formats/style/test_bar.py
++++ b/pandas/tests/io/formats/style/test_bar.py
+@@ -3,13 +3,14 @@ import io
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import (
+     NA,
+     DataFrame,
+     read_csv,
+ )
+-pytest.importorskip("jinja2")
++td.versioned_importorskip("jinja2")
+ def bar_grad(a=None, b=None, c=None, d=None):
+--- a/pandas/tests/io/formats/style/test_exceptions.py
++++ b/pandas/tests/io/formats/style/test_exceptions.py
+@@ -1,6 +1,7 @@
+ import pytest
+-jinja2 = pytest.importorskip("jinja2")
++import pandas.util._test_decorators as td
++jinja2 = td.versioned_importorskip("jinja2")
+ from pandas import (
+     DataFrame,
+--- a/pandas/tests/io/formats/style/test_format.py
++++ b/pandas/tests/io/formats/style/test_format.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import (
+     NA,
+     DataFrame,
+@@ -11,7 +12,7 @@ from pandas import (
+     option_context,
+ )
+-pytest.importorskip("jinja2")
++td.versioned_importorskip("jinja2")
+ from pandas.io.formats.style import Styler
+ from pandas.io.formats.style_render import _str_escape
+--- a/pandas/tests/io/formats/style/test_highlight.py
++++ b/pandas/tests/io/formats/style/test_highlight.py
+@@ -1,13 +1,14 @@
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import (
+     NA,
+     DataFrame,
+     IndexSlice,
+ )
+-pytest.importorskip("jinja2")
++td.versioned_importorskip("jinja2")
+ from pandas.io.formats.style import Styler
+--- a/pandas/tests/io/formats/style/test_html.py
++++ b/pandas/tests/io/formats/style/test_html.py
+@@ -6,13 +6,14 @@ from textwrap import (
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import (
+     DataFrame,
+     MultiIndex,
+     option_context,
+ )
+-jinja2 = pytest.importorskip("jinja2")
++jinja2 = td.versioned_importorskip("jinja2")
+ from pandas.io.formats.style import Styler
+--- a/pandas/tests/io/formats/style/test_matplotlib.py
++++ b/pandas/tests/io/formats/style/test_matplotlib.py
+@@ -3,14 +3,15 @@ import gc
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import (
+     DataFrame,
+     IndexSlice,
+     Series,
+ )
+-pytest.importorskip("matplotlib")
+-pytest.importorskip("jinja2")
++td.versioned_importorskip("matplotlib")
++td.versioned_importorskip("jinja2")
+ import matplotlib as mpl
+@@ -23,9 +24,9 @@ def mpl_cleanup():
+     # 1) Resets units registry
+     # 2) Resets rc_context
+     # 3) Closes all figures
+-    mpl = pytest.importorskip("matplotlib")
+-    mpl_units = pytest.importorskip("matplotlib.units")
+-    plt = pytest.importorskip("matplotlib.pyplot")
++    mpl = td.versioned_importorskip("matplotlib")
++    mpl_units = td.versioned_importorskip("matplotlib.units")
++    plt = td.versioned_importorskip("matplotlib.pyplot")
+     orig_units_registry = mpl_units.registry.copy()
+     with mpl.rc_context():
+         mpl.use("template")
+--- a/pandas/tests/io/formats/style/test_non_unique.py
++++ b/pandas/tests/io/formats/style/test_non_unique.py
+@@ -2,12 +2,13 @@ from textwrap import dedent
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import (
+     DataFrame,
+     IndexSlice,
+ )
+-pytest.importorskip("jinja2")
++td.versioned_importorskip("jinja2")
+ from pandas.io.formats.style import Styler
+--- a/pandas/tests/io/formats/style/test_style.py
++++ b/pandas/tests/io/formats/style/test_style.py
+@@ -16,7 +16,7 @@ from pandas import (
+ import pandas._testing as tm
+ import pandas.util._test_decorators as td
+-jinja2 = pytest.importorskip("jinja2")
++jinja2 = td.versioned_importorskip("jinja2")
+ from pandas.io.formats.style import (  # isort:skip
+     Styler,
+ )
+--- a/pandas/tests/io/formats/style/test_to_latex.py
++++ b/pandas/tests/io/formats/style/test_to_latex.py
+@@ -3,6 +3,7 @@ from textwrap import dedent
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import (
+     DataFrame,
+     MultiIndex,
+@@ -10,7 +11,7 @@ from pandas import (
+     option_context,
+ )
+-pytest.importorskip("jinja2")
++td.versioned_importorskip("jinja2")
+ from pandas.io.formats.style import Styler
+ from pandas.io.formats.style_render import (
+     _parse_latex_cell_styles,
+--- a/pandas/tests/io/formats/style/test_to_string.py
++++ b/pandas/tests/io/formats/style/test_to_string.py
+@@ -2,12 +2,13 @@ from textwrap import dedent
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import (
+     DataFrame,
+     Series,
+ )
+-pytest.importorskip("jinja2")
++td.versioned_importorskip("jinja2")
+ from pandas.io.formats.style import Styler
+--- a/pandas/tests/io/formats/style/test_tooltip.py
++++ b/pandas/tests/io/formats/style/test_tooltip.py
+@@ -1,9 +1,10 @@
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import DataFrame
+-pytest.importorskip("jinja2")
++td.versioned_importorskip("jinja2")
+ from pandas.io.formats.style import Styler
+--- a/pandas/tests/io/formats/test_format.py
++++ b/pandas/tests/io/formats/test_format.py
+@@ -11,6 +11,7 @@ from shutil import get_terminal_size
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas._config import using_pyarrow_string_dtype
+ import pandas as pd
+@@ -2268,7 +2269,7 @@ def test_filepath_or_buffer_arg(
+ ):
+     df = DataFrame([data])
+     if method in ["to_latex"]:  # uses styler implementation
+-        pytest.importorskip("jinja2")
++        td.versioned_importorskip("jinja2")
+     if filepath_or_buffer_id not in ["string", "pathlike"] and encoding is not None:
+         with pytest.raises(
+@@ -2287,7 +2288,7 @@ def test_filepath_or_buffer_arg(
+ @pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"])
+ def test_filepath_or_buffer_bad_arg_raises(float_frame, method):
+     if method in ["to_latex"]:  # uses styler implementation
+-        pytest.importorskip("jinja2")
++        td.versioned_importorskip("jinja2")
+     msg = "buf is not a file name and it has no write method"
+     with pytest.raises(TypeError, match=msg):
+         getattr(float_frame, method)(buf=object())
+--- a/pandas/tests/io/formats/test_to_excel.py
++++ b/pandas/tests/io/formats/test_to_excel.py
+@@ -6,6 +6,7 @@ import string
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.errors import CSSWarning
+ import pandas._testing as tm
+@@ -336,7 +337,7 @@ def tests_css_named_colors_valid():
+ def test_css_named_colors_from_mpl_present():
+-    mpl_colors = pytest.importorskip("matplotlib.colors")
++    mpl_colors = td.versioned_importorskip("matplotlib.colors")
+     pd_colors = CSSToExcelConverter.NAMED_COLORS
+     for name, color in mpl_colors.CSS4_COLORS.items():
+--- a/pandas/tests/io/formats/test_to_latex.py
++++ b/pandas/tests/io/formats/test_to_latex.py
+@@ -4,6 +4,7 @@ from textwrap import dedent
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+     DataFrame,
+@@ -11,7 +12,7 @@ from pandas import (
+ )
+ import pandas._testing as tm
+-pytest.importorskip("jinja2")
++td.versioned_importorskip("jinja2")
+ def _dedent(string):
+--- a/pandas/tests/io/formats/test_to_markdown.py
++++ b/pandas/tests/io/formats/test_to_markdown.py
+@@ -5,10 +5,11 @@ from io import (
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ import pandas._testing as tm
+-pytest.importorskip("tabulate")
++td.versioned_importorskip("tabulate")
+ def test_simple():
+--- a/pandas/tests/io/formats/test_to_string.py
++++ b/pandas/tests/io/formats/test_to_string.py
+@@ -10,6 +10,7 @@ from textwrap import dedent
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas._config import using_pyarrow_string_dtype
+ from pandas import (
+@@ -748,7 +749,7 @@ class TestDataFrameToString:
+     def test_to_string_string_dtype(self):
+         # GH#50099
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         df = DataFrame(
+             {"x": ["foo", "bar", "baz"], "y": ["a", "b", "c"], "z": [1, 2, 3]}
+         )
+--- a/pandas/tests/io/json/test_pandas.py
++++ b/pandas/tests/io/json/test_pandas.py
+@@ -2034,7 +2034,7 @@ class TestPandasContainer:
+         self, string_storage, dtype_backend, orient, using_infer_string
+     ):
+         # GH#50750
+-        pa = pytest.importorskip("pyarrow")
++        pa = td.versioned_importorskip("pyarrow")
+         df = DataFrame(
+             {
+                 "a": Series([1, np.nan, 3], dtype="Int64"),
+@@ -2056,7 +2056,7 @@ class TestPandasContainer:
+             string_array_na = StringArray(np.array(["a", "b", NA], dtype=np.object_))
+         elif dtype_backend == "pyarrow":
+-            pa = pytest.importorskip("pyarrow")
++            pa = td.versioned_importorskip("pyarrow")
+             from pandas.arrays import ArrowExtensionArray
+             string_array = ArrowExtensionArray(pa.array(["a", "b", "c"]))
+@@ -2103,7 +2103,7 @@ class TestPandasContainer:
+     @pytest.mark.parametrize("orient", ["split", "records", "index"])
+     def test_read_json_nullable_series(self, string_storage, dtype_backend, orient):
+         # GH#50750
+-        pa = pytest.importorskip("pyarrow")
++        pa = td.versioned_importorskip("pyarrow")
+         ser = Series([1, np.nan, 3], dtype="Int64")
+         out = ser.to_json(orient=orient)
+@@ -2147,7 +2147,7 @@ def test_pyarrow_engine_lines_false():
+ def test_json_roundtrip_string_inference(orient):
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     df = DataFrame(
+         [["a", "b"], ["c", "d"]], index=["row 1", "row 2"], columns=["col 1", "col 2"]
+     )
+--- a/pandas/tests/io/json/test_readlines.py
++++ b/pandas/tests/io/json/test_readlines.py
+@@ -5,6 +5,7 @@ from pathlib import Path
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+     DataFrame,
+@@ -28,7 +29,7 @@ def lines_json_df():
+ @pytest.fixture(params=["ujson", "pyarrow"])
+ def engine(request):
+     if request.param == "pyarrow":
+-        pytest.importorskip("pyarrow.json")
++        td.versioned_importorskip("pyarrow.json")
+     return request.param
+--- a/pandas/tests/io/parser/conftest.py
++++ b/pandas/tests/io/parser/conftest.py
+@@ -4,6 +4,7 @@ import os
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.compat._optional import VERSIONS
+ from pandas import (
+@@ -135,7 +136,7 @@ def all_parsers(request):
+     """
+     parser = request.param()
+     if parser.engine == "pyarrow":
+-        pytest.importorskip("pyarrow", VERSIONS["pyarrow"])
++        td.versioned_importorskip("pyarrow", VERSIONS["pyarrow"])
+         # Try finding a way to disable threads all together
+         # for more stable CI runs
+         import pyarrow
+--- a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
++++ b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
+@@ -8,6 +8,7 @@ from io import StringIO
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.errors import ParserWarning
+ import pandas as pd
+@@ -460,7 +461,7 @@ def test_dtype_backend_and_dtype(all_par
+ def test_dtype_backend_string(all_parsers, string_storage):
+     # GH#36712
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     with pd.option_context("mode.string_storage", string_storage):
+         parser = all_parsers
+@@ -503,7 +504,7 @@ def test_dtype_backend_ea_dtype_specifie
+ def test_dtype_backend_pyarrow(all_parsers, request):
+     # GH#36712
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     parser = all_parsers
+     data = """a,b,c,d,e,f,g,h,i,j
+@@ -556,7 +557,7 @@ def test_ea_int_avoid_overflow(all_parse
+ def test_string_inference(all_parsers):
+     # GH#54430
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     dtype = "string[pyarrow_numpy]"
+     data = """a,b
+@@ -577,7 +578,7 @@ y,2
+ @pytest.mark.parametrize("dtype", ["O", object, "object", np.object_, str, np.str_])
+ def test_string_inference_object_dtype(all_parsers, dtype):
+     # GH#56047
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     data = """a,b
+ x,a
+--- a/pandas/tests/io/parser/test_concatenate_chunks.py
++++ b/pandas/tests/io/parser/test_concatenate_chunks.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.errors import DtypeWarning
+ import pandas._testing as tm
+@@ -11,7 +12,7 @@ from pandas.io.parsers.c_parser_wrapper
+ def test_concatenate_chunks_pyarrow():
+     # GH#51876
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     chunks = [
+         {0: ArrowExtensionArray(pa.array([1.5, 2.5]))},
+         {0: ArrowExtensionArray(pa.array([1, 2]))},
+@@ -23,7 +24,7 @@ def test_concatenate_chunks_pyarrow():
+ def test_concatenate_chunks_pyarrow_strings():
+     # GH#51876
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     chunks = [
+         {0: ArrowExtensionArray(pa.array([1.5, 2.5]))},
+         {0: ArrowExtensionArray(pa.array(["a", "b"]))},
+--- a/pandas/tests/io/parser/test_network.py
++++ b/pandas/tests/io/parser/test_network.py
+@@ -80,7 +80,7 @@ class TestS3:
+     def test_parse_public_s3_bucket(self, s3_public_bucket_with_data, tips_df, s3so):
+         # more of an integration test due to the not-public contents portion
+         # can probably mock this though.
+-        pytest.importorskip("s3fs")
++        td.versioned_importorskip("s3fs")
+         for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]:
+             df = read_csv(
+                 f"s3://{s3_public_bucket_with_data.name}/tips.csv" + ext,
+@@ -93,7 +93,7 @@ class TestS3:
+     def test_parse_private_s3_bucket(self, s3_private_bucket_with_data, tips_df, s3so):
+         # Read public file from bucket with not-public contents
+-        pytest.importorskip("s3fs")
++        td.versioned_importorskip("s3fs")
+         df = read_csv(
+             f"s3://{s3_private_bucket_with_data.name}/tips.csv", storage_options=s3so
+         )
+@@ -258,7 +258,7 @@ class TestS3:
+     def test_write_s3_parquet_fails(self, tips_df, s3so):
+         # GH 27679
+         # Attempting to write to an invalid S3 path should raise
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         import botocore
+         # GH 34087
+@@ -318,7 +318,7 @@ class TestS3:
+         self, s3_public_bucket_with_data, feather_file, s3so
+     ):
+         # GH 29055
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         expected = read_feather(feather_file)
+         res = read_feather(
+             f"s3://{s3_public_bucket_with_data.name}/simple_dataset.feather",
+--- a/pandas/tests/io/parser/test_python_parser_only.py
++++ b/pandas/tests/io/parser/test_python_parser_only.py
+@@ -17,6 +17,7 @@ from typing import TYPE_CHECKING
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.errors import (
+     ParserError,
+     ParserWarning,
+@@ -167,7 +168,7 @@ def test_decompression_regex_sep(python_
+     data = data.replace(b",", b"::")
+     expected = parser.read_csv(csv1)
+-    module = pytest.importorskip(compression)
++    module = td.versioned_importorskip(compression)
+     klass = getattr(module, klass)
+     with tm.ensure_clean() as path:
+--- a/pandas/tests/io/parser/test_read_fwf.py
++++ b/pandas/tests/io/parser/test_read_fwf.py
+@@ -14,6 +14,7 @@ from pathlib import Path
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.errors import EmptyDataError
+ import pandas as pd
+@@ -972,13 +973,13 @@ def test_dtype_backend(string_storage, d
+         arr = StringArray(np.array(["a", "b"], dtype=np.object_))
+         arr_na = StringArray(np.array([pd.NA, "a"], dtype=np.object_))
+     elif dtype_backend == "pyarrow":
+-        pa = pytest.importorskip("pyarrow")
++        pa = td.versioned_importorskip("pyarrow")
+         from pandas.arrays import ArrowExtensionArray
+         arr = ArrowExtensionArray(pa.array(["a", "b"]))
+         arr_na = ArrowExtensionArray(pa.array([None, "a"]))
+     else:
+-        pa = pytest.importorskip("pyarrow")
++        pa = td.versioned_importorskip("pyarrow")
+         arr = ArrowStringArray(pa.array(["a", "b"]))
+         arr_na = ArrowStringArray(pa.array([None, "a"]))
+@@ -1002,7 +1003,7 @@ def test_dtype_backend(string_storage, d
+         }
+     )
+     if dtype_backend == "pyarrow":
+-        pa = pytest.importorskip("pyarrow")
++        pa = td.versioned_importorskip("pyarrow")
+         from pandas.arrays import ArrowExtensionArray
+         expected = DataFrame(
+--- a/pandas/tests/io/parser/test_upcast.py
++++ b/pandas/tests/io/parser/test_upcast.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas._libs.parsers import (
+     _maybe_upcast,
+     na_values,
+@@ -87,7 +88,7 @@ def test_maybe_upcaste_all_nan():
+ @pytest.mark.parametrize("val", [na_values[np.object_], "c"])
+ def test_maybe_upcast_object(val, string_storage):
+     # GH#36712
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     with pd.option_context("mode.string_storage", string_storage):
+         arr = np.array(["a", "b", val], dtype=np.object_)
+--- a/pandas/tests/io/pytables/common.py
++++ b/pandas/tests/io/pytables/common.py
+@@ -5,9 +5,10 @@ import tempfile
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.io.pytables import HDFStore
+-tables = pytest.importorskip("tables")
++tables = td.versioned_importorskip("tables")
+ # set these parameters so we don't have file sharing
+ tables.parameters.MAX_NUMEXPR_THREADS = 1
+ tables.parameters.MAX_BLOSC_THREADS = 1
+--- a/pandas/tests/io/pytables/test_append.py
++++ b/pandas/tests/io/pytables/test_append.py
+@@ -29,7 +29,7 @@ is_crashing_arch=bool((platform.uname()[
+ pytestmark = pytest.mark.single_cpu
+-tables = pytest.importorskip("tables")
++tables = td.versioned_importorskip("tables")
+ @pytest.mark.filterwarnings("ignore::tables.NaturalNameWarning")
+--- a/pandas/tests/io/pytables/test_compat.py
++++ b/pandas/tests/io/pytables/test_compat.py
+@@ -1,9 +1,10 @@
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ import pandas._testing as tm
+-tables = pytest.importorskip("tables")
++tables = td.versioned_importorskip("tables")
+ @pytest.fixture
+--- a/pandas/tests/io/pytables/test_read.py
++++ b/pandas/tests/io/pytables/test_read.py
+@@ -401,7 +401,7 @@ def test_read_py2_hdf_file_in_py3(datapa
+ def test_read_infer_string(tmp_path, setup_path):
+     # GH#54431
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     df = DataFrame({"a": ["a", "b", None]})
+     path = tmp_path / setup_path
+     df.to_hdf(path, key="data", format="table")
+--- a/pandas/tests/io/pytables/test_round_trip.py
++++ b/pandas/tests/io/pytables/test_round_trip.py
+@@ -565,7 +565,7 @@ def test_round_trip_equals(tmp_path, set
+ def test_infer_string_columns(tmp_path, setup_path):
+     # GH#
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     path = tmp_path / setup_path
+     with pd.option_context("future.infer_string", True):
+         df = DataFrame(1, columns=list("ABCD"), index=list(range(10))).set_index(
+--- a/pandas/tests/io/pytables/test_store.py
++++ b/pandas/tests/io/pytables/test_store.py
+@@ -7,6 +7,7 @@ import time
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+     DataFrame,
+@@ -37,7 +38,7 @@ is_crashing_arch=bool((platform.uname()[
+ pytestmark = pytest.mark.single_cpu
+-tables = pytest.importorskip("tables")
++tables = td.versioned_importorskip("tables")
+ def test_context(setup_path):
+--- a/pandas/tests/io/pytables/test_subclass.py
++++ b/pandas/tests/io/pytables/test_subclass.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import (
+     DataFrame,
+     Series,
+@@ -12,7 +13,7 @@ from pandas.io.pytables import (
+     read_hdf,
+ )
+-pytest.importorskip("tables")
++td.versioned_importorskip("tables")
+ class TestHDFStoreSubclass:
+--- a/pandas/tests/io/test_clipboard.py
++++ b/pandas/tests/io/test_clipboard.py
+@@ -3,6 +3,7 @@ from textwrap import dedent
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.errors import (
+     PyperclipException,
+     PyperclipWindowsException,
+@@ -353,14 +354,14 @@ class TestClipboard:
+     ):
+         # GH#50502
+         if string_storage == "pyarrow" or dtype_backend == "pyarrow":
+-            pa = pytest.importorskip("pyarrow")
++            pa = td.versioned_importorskip("pyarrow")
+         if string_storage == "python":
+             string_array = StringArray(np.array(["x", "y"], dtype=np.object_))
+             string_array_na = StringArray(np.array(["x", NA], dtype=np.object_))
+         elif dtype_backend == "pyarrow" and engine != "c":
+-            pa = pytest.importorskip("pyarrow")
++            pa = td.versioned_importorskip("pyarrow")
+             from pandas.arrays import ArrowExtensionArray
+             string_array = ArrowExtensionArray(pa.array(["x", "y"]))
+--- a/pandas/tests/io/test_common.py
++++ b/pandas/tests/io/test_common.py
+@@ -100,7 +100,7 @@ bar2,12,13,14,15
+     def test_stringify_file_and_path_like(self):
+         # GH 38125: do not stringify file objects that are also path-like
+-        fsspec = pytest.importorskip("fsspec")
++        fsspec = td.versioned_importorskip("fsspec")
+         with tm.ensure_clean() as path:
+             with fsspec.open(f"file://{path}", mode="wb") as fsspec_obj:
+                 assert fsspec_obj == icom.stringify_path(fsspec_obj)
+@@ -153,7 +153,7 @@ Look,a snake,🐍"""
+     # Test that pyarrow can handle a file opened with get_handle
+     def test_get_handle_pyarrow_compat(self):
+-        pa_csv = pytest.importorskip("pyarrow.csv")
++        pa_csv = td.versioned_importorskip("pyarrow.csv")
+         # Test latin1, ucs-2, and ucs-4 chars
+         data = """a,b,c
+@@ -196,7 +196,7 @@ Look,a snake,🐍"""
+         ],
+     )
+     def test_read_non_existent(self, reader, module, error_class, fn_ext):
+-        pytest.importorskip(module)
++        td.versioned_importorskip(module)
+         path = os.path.join(HERE, "data", "does_not_exist." + fn_ext)
+         msg1 = rf"File (b')?.+does_not_exist\.{fn_ext}'? does not exist"
+@@ -234,7 +234,7 @@ Look,a snake,🐍"""
+     )
+     # NOTE: Missing parent directory for pd.DataFrame.to_hdf is handled by PyTables
+     def test_write_missing_parent_directory(self, method, module, error_class, fn_ext):
+-        pytest.importorskip(module)
++        td.versioned_importorskip(module)
+         dummy_frame = pd.DataFrame({"a": [1, 2, 3], "b": [2, 3, 4], "c": [3, 4, 5]})
+@@ -264,7 +264,7 @@ Look,a snake,🐍"""
+     def test_read_expands_user_home_dir(
+         self, reader, module, error_class, fn_ext, monkeypatch
+     ):
+-        pytest.importorskip(module)
++        td.versioned_importorskip(module)
+         path = os.path.join("~", "does_not_exist." + fn_ext)
+         monkeypatch.setattr(icom, "_expand_user", lambda x: os.path.join("foo", x))
+@@ -321,7 +321,7 @@ Look,a snake,🐍"""
+         ],
+     )
+     def test_read_fspath_all(self, reader, module, path, datapath):
+-        pytest.importorskip(module)
++        td.versioned_importorskip(module)
+         path = datapath(*path)
+         mypath = CustomFSPath(path)
+@@ -349,13 +349,13 @@ Look,a snake,🐍"""
+     )
+     def test_write_fspath_all(self, writer_name, writer_kwargs, module):
+         if writer_name in ["to_latex"]:  # uses Styler implementation
+-            pytest.importorskip("jinja2")
++            td.versioned_importorskip("jinja2")
+         p1 = tm.ensure_clean("string")
+         p2 = tm.ensure_clean("fspath")
+         df = pd.DataFrame({"A": [1, 2]})
+         with p1 as string, p2 as fspath:
+-            pytest.importorskip(module)
++            td.versioned_importorskip(module)
+             mypath = CustomFSPath(fspath)
+             writer = getattr(df, writer_name)
+@@ -377,7 +377,7 @@ Look,a snake,🐍"""
+         # Same test as write_fspath_all, except HDF5 files aren't
+         # necessarily byte-for-byte identical for a given dataframe, so we'll
+         # have to read and compare equality
+-        pytest.importorskip("tables")
++        td.versioned_importorskip("tables")
+         df = pd.DataFrame({"A": [1, 2]})
+         p1 = tm.ensure_clean("string")
+--- a/pandas/tests/io/test_feather.py
++++ b/pandas/tests/io/test_feather.py
+@@ -2,6 +2,7 @@
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ import pandas._testing as tm
+ from pandas.core.arrays import (
+@@ -15,7 +16,7 @@ pytestmark = pytest.mark.filterwarnings(
+     "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
+ )
+-pa = pytest.importorskip("pyarrow")
++pa = td.versioned_importorskip("pyarrow")
+ @pytest.mark.single_cpu
+--- a/pandas/tests/io/test_fsspec.py
++++ b/pandas/tests/io/test_fsspec.py
+@@ -25,7 +25,7 @@ pytestmark = pytest.mark.filterwarnings(
+ @pytest.fixture
+ def fsspectest():
+-    pytest.importorskip("fsspec")
++    td.versioned_importorskip("fsspec")
+     from fsspec import register_implementation
+     from fsspec.implementations.memory import MemoryFileSystem
+     from fsspec.registry import _registry as registry
+@@ -59,7 +59,7 @@ def df1():
+ @pytest.fixture
+ def cleared_fs():
+-    fsspec = pytest.importorskip("fsspec")
++    fsspec = td.versioned_importorskip("fsspec")
+     memfs = fsspec.filesystem("memory")
+     yield memfs
+@@ -99,7 +99,7 @@ def test_to_csv(cleared_fs, df1):
+ def test_to_excel(cleared_fs, df1):
+-    pytest.importorskip("openpyxl")
++    td.versioned_importorskip("openpyxl")
+     ext = "xlsx"
+     path = f"memory://test/test.{ext}"
+     df1.to_excel(path, index=True)
+@@ -111,7 +111,7 @@ def test_to_excel(cleared_fs, df1):
+ @pytest.mark.parametrize("binary_mode", [False, True])
+ def test_to_csv_fsspec_object(cleared_fs, binary_mode, df1):
+-    fsspec = pytest.importorskip("fsspec")
++    fsspec = td.versioned_importorskip("fsspec")
+     path = "memory://test/test.csv"
+     mode = "wb" if binary_mode else "w"
+@@ -153,7 +153,7 @@ def test_read_table_options(fsspectest):
+ def test_excel_options(fsspectest):
+-    pytest.importorskip("openpyxl")
++    td.versioned_importorskip("openpyxl")
+     extension = "xlsx"
+     df = DataFrame({"a": [0]})
+@@ -168,7 +168,7 @@ def test_excel_options(fsspectest):
+ def test_to_parquet_new_file(cleared_fs, df1):
+     """Regression test for writing to a not-yet-existent GCS Parquet file."""
+-    pytest.importorskip("fastparquet")
++    td.versioned_importorskip("fastparquet")
+     df1.to_parquet(
+         "memory://test/test.csv", index=True, engine="fastparquet", compression=None
+@@ -177,7 +177,7 @@ def test_to_parquet_new_file(cleared_fs,
+ def test_arrowparquet_options(fsspectest):
+     """Regression test for writing to a not-yet-existent GCS Parquet file."""
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     df = DataFrame({"a": [0]})
+     df.to_parquet(
+         "testmem://test/test.csv",
+@@ -197,7 +197,7 @@ def test_arrowparquet_options(fsspectest
+ @td.skip_array_manager_not_yet_implemented  # TODO(ArrayManager) fastparquet
+ def test_fastparquet_options(fsspectest):
+     """Regression test for writing to a not-yet-existent GCS Parquet file."""
+-    pytest.importorskip("fastparquet")
++    td.versioned_importorskip("fastparquet")
+     df = DataFrame({"a": [0]})
+     df.to_parquet(
+@@ -217,7 +217,7 @@ def test_fastparquet_options(fsspectest)
+ @pytest.mark.single_cpu
+ def test_from_s3_csv(s3_public_bucket_with_data, tips_file, s3so):
+-    pytest.importorskip("s3fs")
++    td.versioned_importorskip("s3fs")
+     tm.assert_equal(
+         read_csv(
+             f"s3://{s3_public_bucket_with_data.name}/tips.csv", storage_options=s3so
+@@ -242,7 +242,7 @@ def test_from_s3_csv(s3_public_bucket_wi
+ @pytest.mark.single_cpu
+ @pytest.mark.parametrize("protocol", ["s3", "s3a", "s3n"])
+ def test_s3_protocols(s3_public_bucket_with_data, tips_file, protocol, s3so):
+-    pytest.importorskip("s3fs")
++    td.versioned_importorskip("s3fs")
+     tm.assert_equal(
+         read_csv(
+             f"{protocol}://{s3_public_bucket_with_data.name}/tips.csv",
+@@ -255,8 +255,8 @@ def test_s3_protocols(s3_public_bucket_w
+ @pytest.mark.single_cpu
+ @td.skip_array_manager_not_yet_implemented  # TODO(ArrayManager) fastparquet
+ def test_s3_parquet(s3_public_bucket, s3so, df1):
+-    pytest.importorskip("fastparquet")
+-    pytest.importorskip("s3fs")
++    td.versioned_importorskip("fastparquet")
++    td.versioned_importorskip("s3fs")
+     fn = f"s3://{s3_public_bucket.name}/test.parquet"
+     df1.to_parquet(
+@@ -274,7 +274,7 @@ def test_not_present_exception():
+ def test_feather_options(fsspectest):
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     df = DataFrame({"a": [0]})
+     df.to_feather("testmem://mockfile", storage_options={"test": "feather_write"})
+     assert fsspectest.test[0] == "feather_write"
+@@ -321,7 +321,7 @@ def test_stata_options(fsspectest):
+ def test_markdown_options(fsspectest):
+-    pytest.importorskip("tabulate")
++    td.versioned_importorskip("tabulate")
+     df = DataFrame({"a": [0]})
+     df.to_markdown("testmem://mockfile", storage_options={"test": "md_write"})
+     assert fsspectest.test[0] == "md_write"
+@@ -329,7 +329,7 @@ def test_markdown_options(fsspectest):
+ def test_non_fsspec_options():
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     with pytest.raises(ValueError, match="storage_options"):
+         read_csv("localfile", storage_options={"a": True})
+     with pytest.raises(ValueError, match="storage_options"):
+--- a/pandas/tests/io/test_gcs.py
++++ b/pandas/tests/io/test_gcs.py
+@@ -29,8 +29,8 @@ pytestmark = pytest.mark.filterwarnings(
+ @pytest.fixture
+ def gcs_buffer():
+     """Emulate GCS using a binary buffer."""
+-    pytest.importorskip("gcsfs")
+-    fsspec = pytest.importorskip("fsspec")
++    td.versioned_importorskip("gcsfs")
++    fsspec = td.versioned_importorskip("fsspec")
+     gcs_buffer = BytesIO()
+     gcs_buffer.close = lambda: True
+@@ -83,8 +83,8 @@ def test_to_read_gcs(gcs_buffer, format,
+         df1.to_json(path)
+         df2 = read_json(path, convert_dates=["dt"])
+     elif format == "parquet":
+-        pytest.importorskip("pyarrow")
+-        pa_fs = pytest.importorskip("pyarrow.fs")
++        td.versioned_importorskip("pyarrow")
++        pa_fs = td.versioned_importorskip("pyarrow.fs")
+         class MockFileSystem(pa_fs.FileSystem):
+             @staticmethod
+@@ -107,7 +107,7 @@ def test_to_read_gcs(gcs_buffer, format,
+         captured = capsys.readouterr()
+         assert captured.out == "Using pyarrow filesystem\nUsing pyarrow filesystem\n"
+     elif format == "markdown":
+-        pytest.importorskip("tabulate")
++        td.versioned_importorskip("tabulate")
+         df1.to_markdown(path)
+         df2 = df1
+@@ -196,8 +196,8 @@ def test_to_csv_compression_encoding_gcs
+ def test_to_parquet_gcs_new_file(monkeypatch, tmpdir):
+     """Regression test for writing to a not-yet-existent GCS Parquet file."""
+-    pytest.importorskip("fastparquet")
+-    pytest.importorskip("gcsfs")
++    td.versioned_importorskip("fastparquet")
++    td.versioned_importorskip("gcsfs")
+     from fsspec import AbstractFileSystem
+--- a/pandas/tests/io/test_html.py
++++ b/pandas/tests/io/test_html.py
+@@ -71,8 +71,8 @@ def assert_framelist_equal(list1, list2,
+ def test_bs4_version_fails(monkeypatch, datapath):
+-    bs4 = pytest.importorskip("bs4")
+-    pytest.importorskip("html5lib")
++    bs4 = td.versioned_importorskip("bs4")
++    td.versioned_importorskip("html5lib")
+     monkeypatch.setattr(bs4, "__version__", "4.2")
+     with pytest.raises(ImportError, match="Pandas requires version"):
+@@ -89,9 +89,9 @@ def test_invalid_flavor():
+ def test_same_ordering(datapath):
+-    pytest.importorskip("bs4")
+-    pytest.importorskip("lxml")
+-    pytest.importorskip("html5lib")
++    td.versioned_importorskip("bs4")
++    td.versioned_importorskip("lxml")
++    td.versioned_importorskip("html5lib")
+     filename = datapath("io", "data", "html", "valid_markup.html")
+     dfs_lxml = read_html(filename, index_col=0, flavor=["lxml"])
+@@ -184,13 +184,13 @@ class TestReadHtml:
+             string_array = StringArray(np.array(["a", "b", "c"], dtype=np.object_))
+             string_array_na = StringArray(np.array(["a", "b", NA], dtype=np.object_))
+         elif dtype_backend == "pyarrow":
+-            pa = pytest.importorskip("pyarrow")
++            pa = td.versioned_importorskip("pyarrow")
+             from pandas.arrays import ArrowExtensionArray
+             string_array = ArrowExtensionArray(pa.array(["a", "b", "c"]))
+             string_array_na = ArrowExtensionArray(pa.array(["a", "b", None]))
+         else:
+-            pa = pytest.importorskip("pyarrow")
++            pa = td.versioned_importorskip("pyarrow")
+             string_array = ArrowStringArray(pa.array(["a", "b", "c"]))
+             string_array_na = ArrowStringArray(pa.array(["a", "b", None]))
+--- a/pandas/tests/io/test_http_headers.py
++++ b/pandas/tests/io/test_http_headers.py
+@@ -161,7 +161,7 @@ def test_to_parquet_to_disk_with_storage
+         "Auth": "other_custom",
+     }
+-    pytest.importorskip(engine)
++    td.versioned_importorskip(engine)
+     true_df = pd.DataFrame({"column_name": ["column_value"]})
+     msg = (
+--- a/pandas/tests/io/test_orc.py
++++ b/pandas/tests/io/test_orc.py
+@@ -8,12 +8,13 @@ import pathlib
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import read_orc
+ import pandas._testing as tm
+ from pandas.core.arrays import StringArray
+-pytest.importorskip("pyarrow.orc")
++td.versioned_importorskip("pyarrow.orc")
+ import pyarrow as pa
+@@ -248,7 +249,7 @@ def test_orc_reader_snappy_compressed(di
+ def test_orc_roundtrip_file(dirpath):
+     # GH44554
+     # PyArrow gained ORC write support with the current argument order
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     data = {
+         "boolean1": np.array([False, True], dtype="bool"),
+@@ -273,7 +274,7 @@ def test_orc_roundtrip_file(dirpath):
+ def test_orc_roundtrip_bytesio():
+     # GH44554
+     # PyArrow gained ORC write support with the current argument order
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     data = {
+         "boolean1": np.array([False, True], dtype="bool"),
+@@ -297,7 +298,7 @@ def test_orc_roundtrip_bytesio():
+ def test_orc_writer_dtypes_not_supported(orc_writer_dtypes_not_supported):
+     # GH44554
+     # PyArrow gained ORC write support with the current argument order
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     msg = "The dtype of one or more columns is not supported yet."
+     with pytest.raises(NotImplementedError, match=msg):
+@@ -305,7 +306,7 @@ def test_orc_writer_dtypes_not_supported
+ def test_orc_dtype_backend_pyarrow():
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     df = pd.DataFrame(
+         {
+             "string": list("abc"),
+@@ -341,7 +342,7 @@ def test_orc_dtype_backend_pyarrow():
+ def test_orc_dtype_backend_numpy_nullable():
+     # GH#50503
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     df = pd.DataFrame(
+         {
+             "string": list("abc"),
+--- a/pandas/tests/io/test_parquet.py
++++ b/pandas/tests/io/test_parquet.py
+@@ -8,6 +8,7 @@ import pathlib
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas._config import using_copy_on_write
+ from pandas._config.config import _get_option
+@@ -389,7 +390,7 @@ class Base:
+     @pytest.mark.single_cpu
+     def test_parquet_read_from_url(self, httpserver, datapath, df_compat, engine):
+         if engine != "auto":
+-            pytest.importorskip(engine)
++            td.versioned_importorskip(engine)
+         with open(datapath("io", "data", "parquet", "simple.parquet"), mode="rb") as f:
+             httpserver.serve_content(content=f.read())
+             df = read_parquet(httpserver.url)
+@@ -611,7 +612,7 @@ class TestBasic(Base):
+             check_round_trip(df, engine)
+     def test_dtype_backend(self, engine, request):
+-        pq = pytest.importorskip("pyarrow.parquet")
++        pq = td.versioned_importorskip("pyarrow.parquet")
+         if engine == "fastparquet":
+             # We are manually disabling fastparquet's
+@@ -799,7 +800,7 @@ class TestParquetPyArrow(Base):
+     @pytest.mark.single_cpu
+     def test_s3_roundtrip_explicit_fs(self, df_compat, s3_public_bucket, pa, s3so):
+-        s3fs = pytest.importorskip("s3fs")
++        s3fs = td.versioned_importorskip("s3fs")
+         s3 = s3fs.S3FileSystem(**s3so)
+         kw = {"filesystem": s3}
+         check_round_trip(
+@@ -833,7 +834,7 @@ class TestParquetPyArrow(Base):
+     def test_s3_roundtrip_for_dir(
+         self, df_compat, s3_public_bucket, pa, partition_col, s3so
+     ):
+-        pytest.importorskip("s3fs")
++        td.versioned_importorskip("s3fs")
+         # GH #26388
+         expected_df = df_compat.copy()
+@@ -862,14 +863,14 @@ class TestParquetPyArrow(Base):
+         )
+     def test_read_file_like_obj_support(self, df_compat):
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         buffer = BytesIO()
+         df_compat.to_parquet(buffer)
+         df_from_buf = read_parquet(buffer)
+         tm.assert_frame_equal(df_compat, df_from_buf)
+     def test_expand_user(self, df_compat, monkeypatch):
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         monkeypatch.setenv("HOME", "TestingUser")
+         monkeypatch.setenv("USERPROFILE", "TestingUser")
+         with pytest.raises(OSError, match=r".*TestingUser.*"):
+@@ -924,7 +925,7 @@ class TestParquetPyArrow(Base):
+     def test_additional_extension_arrays(self, pa):
+         # test additional ExtensionArrays that are supported through the
+         # __arrow_array__ protocol
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         df = pd.DataFrame(
+             {
+                 "a": pd.Series([1, 2, 3], dtype="Int64"),
+@@ -939,7 +940,7 @@ class TestParquetPyArrow(Base):
+     def test_pyarrow_backed_string_array(self, pa, string_storage):
+         # test ArrowStringArray supported through the __arrow_array__ protocol
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         df = pd.DataFrame({"a": pd.Series(["a", None, "c"], dtype="string[pyarrow]")})
+         with pd.option_context("string_storage", string_storage):
+             check_round_trip(df, pa, expected=df.astype(f"string[{string_storage}]"))
+@@ -947,7 +948,7 @@ class TestParquetPyArrow(Base):
+     def test_additional_extension_types(self, pa):
+         # test additional ExtensionArrays that are supported through the
+         # __arrow_array__ protocol + by defining a custom ExtensionType
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         df = pd.DataFrame(
+             {
+                 "c": pd.IntervalIndex.from_tuples([(0, 1), (1, 2), (3, 4)]),
+@@ -992,7 +993,7 @@ class TestParquetPyArrow(Base):
+     def test_filter_row_groups(self, pa):
+         # https://github.com/pandas-dev/pandas/issues/26551
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         df = pd.DataFrame({"a": list(range(3))})
+         with tm.ensure_clean() as path:
+             df.to_parquet(path, engine=pa)
+@@ -1349,7 +1350,7 @@ class TestParquetFastParquet(Base):
+         tm.assert_frame_equal(result, df)
+     def test_filesystem_notimplemented(self):
+-        pytest.importorskip("fastparquet")
++        td.versioned_importorskip("fastparquet")
+         df = pd.DataFrame(data={"A": [0, 1], "B": [1, 0]})
+         with tm.ensure_clean() as path:
+             with pytest.raises(
+@@ -1365,7 +1366,7 @@ class TestParquetFastParquet(Base):
+                 read_parquet(path, engine="fastparquet", filesystem="foo")
+     def test_invalid_filesystem(self):
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         df = pd.DataFrame(data={"A": [0, 1], "B": [1, 0]})
+         with tm.ensure_clean() as path:
+             with pytest.raises(
+@@ -1381,7 +1382,7 @@ class TestParquetFastParquet(Base):
+                 read_parquet(path, engine="pyarrow", filesystem="foo")
+     def test_unsupported_pa_filesystem_storage_options(self):
+-        pa_fs = pytest.importorskip("pyarrow.fs")
++        pa_fs = td.versioned_importorskip("pyarrow.fs")
+         df = pd.DataFrame(data={"A": [0, 1], "B": [1, 0]})
+         with tm.ensure_clean() as path:
+             with pytest.raises(
+--- a/pandas/tests/io/test_pickle.py
++++ b/pandas/tests/io/test_pickle.py
+@@ -499,7 +499,7 @@ def test_pickle_generalurl_read(monkeypa
+ def test_pickle_fsspec_roundtrip():
+-    pytest.importorskip("fsspec")
++    td.versioned_importorskip("fsspec")
+     with tm.ensure_clean():
+         mockurl = "memory://mockfile"
+         df = DataFrame(
+--- a/pandas/tests/io/test_s3.py
++++ b/pandas/tests/io/test_s3.py
+@@ -2,13 +2,14 @@ from io import BytesIO
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import read_csv
+ def test_streaming_s3_objects():
+     # GH17135
+     # botocore gained iteration support in 1.10.47, can now be used in read_*
+-    pytest.importorskip("botocore", minversion="1.10.47")
++    td.versioned_importorskip("botocore", min_version="1.10.47")
+     from botocore.response import StreamingBody
+     data = [b"foo,bar,baz\n1,2,3\n4,5,6\n", b"just,the,header\n"]
+@@ -20,7 +21,7 @@ def test_streaming_s3_objects():
+ @pytest.mark.single_cpu
+ def test_read_without_creds_from_pub_bucket(s3_public_bucket_with_data, s3so):
+     # GH 34626
+-    pytest.importorskip("s3fs")
++    td.versioned_importorskip("s3fs")
+     result = read_csv(
+         f"s3://{s3_public_bucket_with_data.name}/tips.csv",
+         nrows=3,
+@@ -33,7 +34,7 @@ def test_read_without_creds_from_pub_buc
+ def test_read_with_creds_from_pub_bucket(s3_public_bucket_with_data, s3so):
+     # Ensure we can read from a public bucket with credentials
+     # GH 34626
+-    pytest.importorskip("s3fs")
++    td.versioned_importorskip("s3fs")
+     df = read_csv(
+         f"s3://{s3_public_bucket_with_data.name}/tips.csv",
+         nrows=5,
+--- a/pandas/tests/io/test_spss.py
++++ b/pandas/tests/io/test_spss.py
+@@ -4,11 +4,12 @@ from pathlib import Path
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ import pandas._testing as tm
+ from pandas.util.version import Version
+-pyreadstat = pytest.importorskip("pyreadstat")
++pyreadstat = td.versioned_importorskip("pyreadstat")
+ # TODO(CoW) - detection of chained assignment in cython
+@@ -101,7 +102,7 @@ def test_spss_umlauts_dtype_backend(data
+     expected = pd.DataFrame({"var1": [1.0, 2.0, 1.0, 3.0]}, dtype="Int64")
+     if dtype_backend == "pyarrow":
+-        pa = pytest.importorskip("pyarrow")
++        pa = td.versioned_importorskip("pyarrow")
+         from pandas.arrays import ArrowExtensionArray
+--- a/pandas/tests/io/test_sql.py
++++ b/pandas/tests/io/test_sql.py
+@@ -601,8 +601,8 @@ def drop_view(
+ @pytest.fixture
+ def mysql_pymysql_engine():
+-    sqlalchemy = pytest.importorskip("sqlalchemy")
+-    pymysql = pytest.importorskip("pymysql")
++    sqlalchemy = td.versioned_importorskip("sqlalchemy")
++    pymysql = td.versioned_importorskip("pymysql")
+     engine = sqlalchemy.create_engine(
+         "mysql+pymysql://root@localhost:3306/pandas",
+         connect_args={"client_flag": pymysql.constants.CLIENT.MULTI_STATEMENTS},
+@@ -649,8 +649,8 @@ def mysql_pymysql_conn_types(mysql_pymys
+ @pytest.fixture
+ def postgresql_psycopg2_engine():
+-    sqlalchemy = pytest.importorskip("sqlalchemy")
+-    pytest.importorskip("psycopg2")
++    sqlalchemy = td.versioned_importorskip("sqlalchemy")
++    td.versioned_importorskip("psycopg2")
+     engine = sqlalchemy.create_engine(
+         "postgresql+psycopg2://postgres:postgres@localhost:5432/pandas",
+         poolclass=sqlalchemy.pool.NullPool,
+@@ -684,7 +684,7 @@ def postgresql_psycopg2_conn(postgresql_
+ @pytest.fixture
+ def postgresql_adbc_conn():
+-    pytest.importorskip("adbc_driver_postgresql")
++    td.versioned_importorskip("adbc_driver_postgresql")
+     from adbc_driver_postgresql import dbapi
+     uri = "postgresql://postgres:postgres@localhost:5432/pandas"
+@@ -747,14 +747,14 @@ def postgresql_psycopg2_conn_types(postg
+ @pytest.fixture
+ def sqlite_str():
+-    pytest.importorskip("sqlalchemy")
++    td.versioned_importorskip("sqlalchemy")
+     with tm.ensure_clean() as name:
+         yield f"sqlite:///{name}"
+ @pytest.fixture
+ def sqlite_engine(sqlite_str):
+-    sqlalchemy = pytest.importorskip("sqlalchemy")
++    sqlalchemy = td.versioned_importorskip("sqlalchemy")
+     engine = sqlalchemy.create_engine(sqlite_str, poolclass=sqlalchemy.pool.NullPool)
+     yield engine
+     for view in get_all_views(engine):
+@@ -772,7 +772,7 @@ def sqlite_conn(sqlite_engine):
+ @pytest.fixture
+ def sqlite_str_iris(sqlite_str, iris_path):
+-    sqlalchemy = pytest.importorskip("sqlalchemy")
++    sqlalchemy = td.versioned_importorskip("sqlalchemy")
+     engine = sqlalchemy.create_engine(sqlite_str)
+     create_and_load_iris(engine, iris_path)
+     create_and_load_iris_view(engine)
+@@ -795,7 +795,7 @@ def sqlite_conn_iris(sqlite_engine_iris)
+ @pytest.fixture
+ def sqlite_str_types(sqlite_str, types_data):
+-    sqlalchemy = pytest.importorskip("sqlalchemy")
++    sqlalchemy = td.versioned_importorskip("sqlalchemy")
+     engine = sqlalchemy.create_engine(sqlite_str)
+     create_and_load_types(engine, types_data, "sqlite")
+     engine.dispose()
+@@ -816,7 +816,7 @@ def sqlite_conn_types(sqlite_engine_type
+ @pytest.fixture
+ def sqlite_adbc_conn():
+-    pytest.importorskip("adbc_driver_sqlite")
++    td.versioned_importorskip("adbc_driver_sqlite")
+     from adbc_driver_sqlite import dbapi
+     with tm.ensure_clean() as name:
+@@ -1001,7 +1001,7 @@ def test_dataframe_to_sql_empty(conn, te
+ @pytest.mark.parametrize("conn", all_connectable)
+ def test_dataframe_to_sql_arrow_dtypes(conn, request):
+     # GH 52046
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     df = DataFrame(
+         {
+             "int": pd.array([1], dtype="int8[pyarrow]"),
+@@ -1035,7 +1035,7 @@ def test_dataframe_to_sql_arrow_dtypes(c
+ @pytest.mark.parametrize("conn", all_connectable)
+ def test_dataframe_to_sql_arrow_dtypes_missing(conn, request, nulls_fixture):
+     # GH 52046
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     df = DataFrame(
+         {
+             "datetime": pd.array(
+@@ -2515,7 +2515,7 @@ def test_sqlalchemy_integer_overload_map
+ @pytest.mark.parametrize("conn", all_connectable)
+ def test_database_uri_string(conn, request, test_frame1):
+-    pytest.importorskip("sqlalchemy")
++    td.versioned_importorskip("sqlalchemy")
+     conn = request.getfixturevalue(conn)
+     # Test read_sql and .to_sql method with a database URI (GH10654)
+     # db_uri = 'sqlite:///:memory:' # raises
+@@ -2537,7 +2537,7 @@ def test_database_uri_string(conn, reque
+ @td.skip_if_installed("pg8000")
+ @pytest.mark.parametrize("conn", all_connectable)
+ def test_pg8000_sqlalchemy_passthrough_error(conn, request):
+-    pytest.importorskip("sqlalchemy")
++    td.versioned_importorskip("sqlalchemy")
+     conn = request.getfixturevalue(conn)
+     # using driver that will not be installed on CI to trigger error
+     # in sqlalchemy.create_engine -> test passing of this error to user
+@@ -3414,7 +3414,7 @@ def test_to_sql_with_negative_npinf(conn
+         # The input {"foo": [-np.inf], "infe0": ["bar"]} does not raise any error
+         # for pymysql version >= 0.10
+         # TODO(GH#36465): remove this version check after GH 36465 is fixed
+-        pymysql = pytest.importorskip("pymysql")
++        pymysql = td.versioned_importorskip("pymysql")
+         if Version(pymysql.__version__) < Version("1.0.3") and "infe0" in df.columns:
+             mark = pytest.mark.xfail(reason="GH 36465")
+@@ -3529,7 +3529,7 @@ def test_options_auto(conn, request, tes
+ def test_options_get_engine():
+-    pytest.importorskip("sqlalchemy")
++    td.versioned_importorskip("sqlalchemy")
+     assert isinstance(get_engine("sqlalchemy"), SQLAlchemyEngine)
+     with pd.option_context("io.sql.engine", "sqlalchemy"):
+@@ -3681,14 +3681,14 @@ def dtype_backend_expected():
+             string_array_na = StringArray(np.array(["a", "b", pd.NA], dtype=np.object_))
+         elif dtype_backend == "pyarrow":
+-            pa = pytest.importorskip("pyarrow")
++            pa = td.versioned_importorskip("pyarrow")
+             from pandas.arrays import ArrowExtensionArray
+             string_array = ArrowExtensionArray(pa.array(["a", "b", "c"]))  # type: ignore[assignment]
+             string_array_na = ArrowExtensionArray(pa.array(["a", "b", None]))  # type: ignore[assignment]
+         else:
+-            pa = pytest.importorskip("pyarrow")
++            pa = td.versioned_importorskip("pyarrow")
+             string_array = ArrowStringArray(pa.array(["a", "b", "c"]))
+             string_array_na = ArrowStringArray(pa.array(["a", "b", None]))
+@@ -3705,7 +3705,7 @@ def dtype_backend_expected():
+             }
+         )
+         if dtype_backend == "pyarrow":
+-            pa = pytest.importorskip("pyarrow")
++            pa = td.versioned_importorskip("pyarrow")
+             from pandas.arrays import ArrowExtensionArray
+@@ -3850,7 +3850,7 @@ def test_row_object_is_named_tuple(sqlit
+ def test_read_sql_string_inference(sqlite_engine):
+     conn = sqlite_engine
+     # GH#54430
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     table = "test"
+     df = DataFrame({"a": ["x", "y"]})
+     df.to_sql(table, con=conn, index=False, if_exists="replace")
+--- a/pandas/tests/io/test_stata.py
++++ b/pandas/tests/io/test_stata.py
+@@ -2045,11 +2045,11 @@ def test_compression(compression, versio
+             with bz2.open(path, "rb") as comp:
+                 fp = io.BytesIO(comp.read())
+         elif compression == "zstd":
+-            zstd = pytest.importorskip("zstandard")
++            zstd = td.versioned_importorskip("zstandard")
+             with zstd.open(path, "rb") as comp:
+                 fp = io.BytesIO(comp.read())
+         elif compression == "xz":
+-            lzma = pytest.importorskip("lzma")
++            lzma = td.versioned_importorskip("lzma")
+             with lzma.open(path, "rb") as comp:
+                 fp = io.BytesIO(comp.read())
+         elif compression is None:
+--- a/pandas/tests/io/xml/test_to_xml.py
++++ b/pandas/tests/io/xml/test_to_xml.py
+@@ -867,7 +867,7 @@ def test_encoding_option_str(xml_baby_na
+ def test_correct_encoding_file(xml_baby_names):
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     df_file = read_xml(xml_baby_names, encoding="ISO-8859-1", parser="lxml")
+     with tm.ensure_clean("test.xml") as path:
+@@ -876,7 +876,7 @@ def test_correct_encoding_file(xml_baby_
+ @pytest.mark.parametrize("encoding", ["UTF-8", "UTF-16", "ISO-8859-1"])
+ def test_wrong_encoding_option_lxml(xml_baby_names, parser, encoding):
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     df_file = read_xml(xml_baby_names, encoding="ISO-8859-1", parser="lxml")
+     with tm.ensure_clean("test.xml") as path:
+@@ -892,7 +892,7 @@ def test_misspelled_encoding(parser, geo
+ def test_xml_declaration_pretty_print(geom_df):
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     expected = """\
+ <data>
+   <row>
+@@ -1005,7 +1005,7 @@ xsl_expected = """\
+ def test_stylesheet_file_like(xsl_row_field_output, mode, geom_df):
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     with open(
+         xsl_row_field_output, mode, encoding="utf-8" if mode == "r" else None
+     ) as f:
+@@ -1015,7 +1015,7 @@ def test_stylesheet_file_like(xsl_row_fi
+ def test_stylesheet_io(xsl_row_field_output, mode, geom_df):
+     # note: By default the bodies of untyped functions are not checked,
+     # consider using --check-untyped-defs
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     xsl_obj: BytesIO | StringIO  # type: ignore[annotation-unchecked]
+     with open(
+@@ -1032,7 +1032,7 @@ def test_stylesheet_io(xsl_row_field_out
+ def test_stylesheet_buffered_reader(xsl_row_field_output, mode, geom_df):
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     with open(
+         xsl_row_field_output, mode, encoding="utf-8" if mode == "r" else None
+     ) as f:
+@@ -1044,7 +1044,7 @@ def test_stylesheet_buffered_reader(xsl_
+ def test_stylesheet_wrong_path(geom_df):
+-    lxml_etree = pytest.importorskip("lxml.etree")
++    lxml_etree = td.versioned_importorskip("lxml.etree")
+     xsl = os.path.join("data", "xml", "row_field_output.xslt")
+@@ -1057,7 +1057,7 @@ def test_stylesheet_wrong_path(geom_df):
+ @pytest.mark.parametrize("val", ["", b""])
+ def test_empty_string_stylesheet(val, geom_df):
+-    lxml_etree = pytest.importorskip("lxml.etree")
++    lxml_etree = td.versioned_importorskip("lxml.etree")
+     msg = "|".join(
+         [
+@@ -1073,7 +1073,7 @@ def test_empty_string_stylesheet(val, ge
+ def test_incorrect_xsl_syntax(geom_df):
+-    lxml_etree = pytest.importorskip("lxml.etree")
++    lxml_etree = td.versioned_importorskip("lxml.etree")
+     xsl = """\
+ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
+@@ -1103,7 +1103,7 @@ def test_incorrect_xsl_syntax(geom_df):
+ def test_incorrect_xsl_eval(geom_df):
+-    lxml_etree = pytest.importorskip("lxml.etree")
++    lxml_etree = td.versioned_importorskip("lxml.etree")
+     xsl = """\
+ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
+@@ -1131,7 +1131,7 @@ def test_incorrect_xsl_eval(geom_df):
+ def test_incorrect_xsl_apply(geom_df):
+-    lxml_etree = pytest.importorskip("lxml.etree")
++    lxml_etree = td.versioned_importorskip("lxml.etree")
+     xsl = """\
+ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
+@@ -1169,7 +1169,7 @@ def test_stylesheet_with_etree(geom_df):
+ def test_style_to_csv(geom_df):
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     xsl = """\
+ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
+     <xsl:output method="text" indent="yes" />
+@@ -1198,7 +1198,7 @@ def test_style_to_csv(geom_df):
+ def test_style_to_string(geom_df):
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     xsl = """\
+ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
+     <xsl:output method="text" indent="yes" />
+@@ -1232,7 +1232,7 @@ def test_style_to_string(geom_df):
+ def test_style_to_json(geom_df):
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     xsl = """\
+ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
+     <xsl:output method="text" indent="yes" />
+@@ -1363,8 +1363,8 @@ def test_unsuported_compression(parser,
+ @pytest.mark.single_cpu
+ def test_s3_permission_output(parser, s3_public_bucket, geom_df):
+-    s3fs = pytest.importorskip("s3fs")
+-    pytest.importorskip("lxml")
++    s3fs = td.versioned_importorskip("s3fs")
++    td.versioned_importorskip("lxml")
+     with tm.external_error_raised((PermissionError, FileNotFoundError)):
+         fs = s3fs.S3FileSystem(anon=True)
+--- a/pandas/tests/io/xml/test_xml.py
++++ b/pandas/tests/io/xml/test_xml.py
+@@ -249,7 +249,7 @@ df_kml = DataFrame(
+ def test_literal_xml_deprecation():
+     # GH 53809
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     msg = (
+         "Passing literal xml to 'read_xml' is deprecated and "
+         "will be removed in a future version. To read from a "
+@@ -289,7 +289,7 @@ def read_xml_iterparse_comp(comp_path, c
+ def test_parser_consistency_file(xml_books):
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     df_file_lxml = read_xml(xml_books, parser="lxml")
+     df_file_etree = read_xml(xml_books, parser="etree")
+@@ -462,7 +462,7 @@ def test_file_handle_close(xml_books, pa
+ @pytest.mark.parametrize("val", ["", b""])
+ def test_empty_string_lxml(val):
+-    lxml_etree = pytest.importorskip("lxml.etree")
++    lxml_etree = td.versioned_importorskip("lxml.etree")
+     msg = "|".join(
+         [
+@@ -505,7 +505,7 @@ def test_wrong_file_path(parser, datapat
+ @pytest.mark.network
+ @pytest.mark.single_cpu
+ def test_url(httpserver, xml_file):
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     with open(xml_file, encoding="utf-8") as f:
+         httpserver.serve_content(content=f.read())
+         df_url = read_xml(httpserver.url, xpath=".//book[count(*)=4]")
+@@ -587,7 +587,7 @@ def test_whitespace(parser):
+ def test_empty_xpath_lxml(xml_books):
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     with pytest.raises(ValueError, match=("xpath does not return any nodes")):
+         read_xml(xml_books, xpath=".//python", parser="lxml")
+@@ -600,7 +600,7 @@ def test_bad_xpath_etree(xml_books):
+ def test_bad_xpath_lxml(xml_books):
+-    lxml_etree = pytest.importorskip("lxml.etree")
++    lxml_etree = td.versioned_importorskip("lxml.etree")
+     with pytest.raises(lxml_etree.XPathEvalError, match=("Invalid expression")):
+         read_xml(xml_books, xpath=".//[book]", parser="lxml")
+@@ -659,7 +659,7 @@ def test_prefix_namespace(parser):
+ def test_consistency_default_namespace():
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     df_lxml = read_xml(
+         StringIO(xml_default_nmsp),
+         xpath=".//ns:row",
+@@ -678,7 +678,7 @@ def test_consistency_default_namespace()
+ def test_consistency_prefix_namespace():
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     df_lxml = read_xml(
+         StringIO(xml_prefix_nmsp),
+         xpath=".//doc:row",
+@@ -710,7 +710,7 @@ def test_missing_prefix_definition_etree
+ def test_missing_prefix_definition_lxml(kml_cta_rail_lines):
+-    lxml_etree = pytest.importorskip("lxml.etree")
++    lxml_etree = td.versioned_importorskip("lxml.etree")
+     with pytest.raises(lxml_etree.XPathEvalError, match=("Undefined namespace prefix")):
+         read_xml(kml_cta_rail_lines, xpath=".//kml:Placemark", parser="lxml")
+@@ -718,7 +718,7 @@ def test_missing_prefix_definition_lxml(
+ @pytest.mark.parametrize("key", ["", None])
+ def test_none_namespace_prefix(key):
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     with pytest.raises(
+         TypeError, match=("empty namespace prefix is not supported in XPath")
+     ):
+@@ -831,7 +831,7 @@ def test_empty_elems_only(parser):
+ def test_attribute_centric_xml():
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     xml = """\
+ <?xml version="1.0" encoding="UTF-8"?>
+ <TrainSchedule>
+@@ -1061,7 +1061,7 @@ def test_ascii_encoding(xml_baby_names,
+ def test_parser_consistency_with_encoding(xml_baby_names):
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     df_xpath_lxml = read_xml(xml_baby_names, parser="lxml", encoding="ISO-8859-1")
+     df_xpath_etree = read_xml(xml_baby_names, parser="etree", encoding="iso-8859-1")
+@@ -1084,7 +1084,7 @@ def test_parser_consistency_with_encodin
+ def test_wrong_encoding_for_lxml():
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     # GH#45133
+     data = """<data>
+   <row>
+@@ -1131,7 +1131,7 @@ def test_wrong_parser(xml_books):
+ def test_stylesheet_file(kml_cta_rail_lines, xsl_flatten_doc):
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     df_style = read_xml(
+         kml_cta_rail_lines,
+         xpath=".//k:Placemark",
+@@ -1158,7 +1158,7 @@ def test_stylesheet_file(kml_cta_rail_li
+ def test_stylesheet_file_like(kml_cta_rail_lines, xsl_flatten_doc, mode):
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     with open(xsl_flatten_doc, mode, encoding="utf-8" if mode == "r" else None) as f:
+         df_style = read_xml(
+             kml_cta_rail_lines,
+@@ -1173,7 +1173,7 @@ def test_stylesheet_file_like(kml_cta_ra
+ def test_stylesheet_io(kml_cta_rail_lines, xsl_flatten_doc, mode):
+     # note: By default the bodies of untyped functions are not checked,
+     # consider using --check-untyped-defs
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     xsl_obj: BytesIO | StringIO  # type: ignore[annotation-unchecked]
+     with open(xsl_flatten_doc, mode, encoding="utf-8" if mode == "r" else None) as f:
+@@ -1193,7 +1193,7 @@ def test_stylesheet_io(kml_cta_rail_line
+ def test_stylesheet_buffered_reader(kml_cta_rail_lines, xsl_flatten_doc, mode):
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     with open(xsl_flatten_doc, mode, encoding="utf-8" if mode == "r" else None) as f:
+         xsl_obj = f.read()
+@@ -1208,7 +1208,7 @@ def test_stylesheet_buffered_reader(kml_
+ def test_style_charset():
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     xml = "<中文標籤><row><c1>1</c1><c2>2</c2></row></中文標籤>"
+     xsl = """\
+@@ -1237,7 +1237,7 @@ def test_style_charset():
+ def test_not_stylesheet(kml_cta_rail_lines, xml_books):
+-    lxml_etree = pytest.importorskip("lxml.etree")
++    lxml_etree = td.versioned_importorskip("lxml.etree")
+     with pytest.raises(
+         lxml_etree.XSLTParseError, match=("document is not a stylesheet")
+@@ -1246,7 +1246,7 @@ def test_not_stylesheet(kml_cta_rail_lin
+ def test_incorrect_xsl_syntax(kml_cta_rail_lines):
+-    lxml_etree = pytest.importorskip("lxml.etree")
++    lxml_etree = td.versioned_importorskip("lxml.etree")
+     xsl = """\
+ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+@@ -1275,7 +1275,7 @@ def test_incorrect_xsl_syntax(kml_cta_ra
+ def test_incorrect_xsl_eval(kml_cta_rail_lines):
+-    lxml_etree = pytest.importorskip("lxml.etree")
++    lxml_etree = td.versioned_importorskip("lxml.etree")
+     xsl = """\
+ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+@@ -1302,7 +1302,7 @@ def test_incorrect_xsl_eval(kml_cta_rail
+ def test_incorrect_xsl_apply(kml_cta_rail_lines):
+-    lxml_etree = pytest.importorskip("lxml.etree")
++    lxml_etree = td.versioned_importorskip("lxml.etree")
+     xsl = """\
+ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
+@@ -1321,7 +1321,7 @@ def test_incorrect_xsl_apply(kml_cta_rai
+ def test_wrong_stylesheet(kml_cta_rail_lines, xml_data_path):
+-    xml_etree = pytest.importorskip("lxml.etree")
++    xml_etree = td.versioned_importorskip("lxml.etree")
+     xsl = xml_data_path / "flatten.xsl"
+@@ -1335,7 +1335,7 @@ def test_wrong_stylesheet(kml_cta_rail_l
+ def test_stylesheet_file_close(kml_cta_rail_lines, xsl_flatten_doc, mode):
+     # note: By default the bodies of untyped functions are not checked,
+     # consider using --check-untyped-defs
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     xsl_obj: BytesIO | StringIO  # type: ignore[annotation-unchecked]
+     with open(xsl_flatten_doc, mode, encoding="utf-8" if mode == "r" else None) as f:
+@@ -1350,7 +1350,7 @@ def test_stylesheet_file_close(kml_cta_r
+ def test_stylesheet_with_etree(kml_cta_rail_lines, xsl_flatten_doc):
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     with pytest.raises(
+         ValueError, match=("To use stylesheet, you need lxml installed")
+     ):
+@@ -1359,7 +1359,7 @@ def test_stylesheet_with_etree(kml_cta_r
+ @pytest.mark.parametrize("val", ["", b""])
+ def test_empty_stylesheet(val, datapath):
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     msg = (
+         "Passing literal xml to 'read_xml' is deprecated and "
+         "will be removed in a future version. To read from a "
+@@ -1662,7 +1662,7 @@ def test_empty_data(xml_books, parser):
+ def test_online_stylesheet():
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("lxml")
+     xml = """\
+ <?xml version="1.0" encoding="UTF-8"?>
+ <catalog>
+@@ -1993,8 +1993,8 @@ def test_unsuported_compression(parser):
+ @pytest.mark.network
+ @pytest.mark.single_cpu
+ def test_s3_parser_consistency(s3_public_bucket_with_data, s3so):
+-    pytest.importorskip("s3fs")
+-    pytest.importorskip("lxml")
++    td.versioned_importorskip("s3fs")
++    td.versioned_importorskip("lxml")
+     s3 = f"s3://{s3_public_bucket_with_data.name}/books.xml"
+     df_lxml = read_xml(s3, parser="lxml", storage_options=s3so)
+@@ -2035,7 +2035,7 @@ def test_read_xml_nullable_dtypes(
+ </data>"""
+     if using_infer_string:
+-        pa = pytest.importorskip("pyarrow")
++        pa = td.versioned_importorskip("pyarrow")
+         string_array = ArrowStringArrayNumpySemantics(pa.array(["x", "y"]))
+         string_array_na = ArrowStringArrayNumpySemantics(pa.array(["x", None]))
+@@ -2044,14 +2044,14 @@ def test_read_xml_nullable_dtypes(
+         string_array_na = StringArray(np.array(["x", NA], dtype=np.object_))
+     elif dtype_backend == "pyarrow":
+-        pa = pytest.importorskip("pyarrow")
++        pa = td.versioned_importorskip("pyarrow")
+         from pandas.arrays import ArrowExtensionArray
+         string_array = ArrowExtensionArray(pa.array(["x", "y"]))
+         string_array_na = ArrowExtensionArray(pa.array(["x", None]))
+     else:
+-        pa = pytest.importorskip("pyarrow")
++        pa = td.versioned_importorskip("pyarrow")
+         string_array = ArrowStringArray(pa.array(["x", "y"]))
+         string_array_na = ArrowStringArray(pa.array(["x", None]))
+@@ -2073,7 +2073,7 @@ def test_read_xml_nullable_dtypes(
+     )
+     if dtype_backend == "pyarrow":
+-        pa = pytest.importorskip("pyarrow")
++        pa = td.versioned_importorskip("pyarrow")
+         from pandas.arrays import ArrowExtensionArray
+         expected = DataFrame(
+--- a/pandas/tests/plotting/conftest.py
++++ b/pandas/tests/plotting/conftest.py
+@@ -3,6 +3,7 @@ import gc
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import (
+     DataFrame,
+     to_datetime,
+@@ -15,9 +16,9 @@ def mpl_cleanup():
+     # 1) Resets units registry
+     # 2) Resets rc_context
+     # 3) Closes all figures
+-    mpl = pytest.importorskip("matplotlib")
+-    mpl_units = pytest.importorskip("matplotlib.units")
+-    plt = pytest.importorskip("matplotlib.pyplot")
++    mpl = td.versioned_importorskip("matplotlib")
++    mpl_units = td.versioned_importorskip("matplotlib.units")
++    plt = td.versioned_importorskip("matplotlib.pyplot")
+     orig_units_registry = mpl_units.registry.copy()
+     with mpl.rc_context():
+         mpl.use("template")
+--- a/pandas/tests/plotting/frame/test_frame.py
++++ b/pandas/tests/plotting/frame/test_frame.py
+@@ -48,8 +48,8 @@ from pandas.util.version import Version
+ from pandas.io.formats.printing import pprint_thing
+-mpl = pytest.importorskip("matplotlib")
+-plt = pytest.importorskip("matplotlib.pyplot")
++mpl = td.versioned_importorskip("matplotlib")
++plt = td.versioned_importorskip("matplotlib.pyplot")
+ class TestDataFramePlots:
+@@ -1119,7 +1119,7 @@ class TestDataFramePlots:
+         _check_box_return_type(result, return_type)
+     def test_kde_df(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         df = DataFrame(np.random.default_rng(2).standard_normal((100, 4)))
+         ax = _check_plot_works(df.plot, kind="kde")
+         expected = [pprint_thing(c) for c in df.columns]
+@@ -1127,13 +1127,13 @@ class TestDataFramePlots:
+         _check_ticks_props(ax, xrot=0)
+     def test_kde_df_rot(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
+         ax = df.plot(kind="kde", rot=20, fontsize=5)
+         _check_ticks_props(ax, xrot=20, xlabelsize=5, ylabelsize=5)
+     def test_kde_df_subplots(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
+         axes = _check_plot_works(
+             df.plot,
+@@ -1144,13 +1144,13 @@ class TestDataFramePlots:
+         _check_axes_shape(axes, axes_num=4, layout=(4, 1))
+     def test_kde_df_logy(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
+         axes = df.plot(kind="kde", logy=True, subplots=True)
+         _check_ax_scales(axes, yaxis="log")
+     def test_kde_missing_vals(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         df = DataFrame(np.random.default_rng(2).uniform(size=(100, 4)))
+         df.loc[0, 0] = np.nan
+         _check_plot_works(df.plot, kind="kde")
+@@ -1447,14 +1447,14 @@ class TestDataFramePlots:
+     @pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds)
+     def test_kind_both_ways(self, kind):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         df = DataFrame({"x": [1, 2, 3]})
+         df.plot(kind=kind)
+         getattr(df.plot, kind)()
+     @pytest.mark.parametrize("kind", ["scatter", "hexbin"])
+     def test_kind_both_ways_x_y(self, kind):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         df = DataFrame({"x": [1, 2, 3]})
+         df.plot("x", "x", kind=kind)
+         getattr(df.plot, kind)("x", "x")
+@@ -2100,7 +2100,7 @@ class TestDataFramePlots:
+     @pytest.mark.parametrize("kind", plotting.PlotAccessor._all_kinds)
+     def test_memory_leak(self, kind):
+         """Check that every plot type gets properly collected."""
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         args = {}
+         if kind in ["hexbin", "scatter", "pie"]:
+             df = DataFrame(
+@@ -2427,7 +2427,7 @@ class TestDataFramePlots:
+         "kind", ("line", "bar", "barh", "hist", "kde", "density", "area", "pie")
+     )
+     def test_group_subplot(self, kind):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         d = {
+             "a": np.arange(10),
+             "b": np.arange(10) + 1,
+--- a/pandas/tests/plotting/frame/test_frame_color.py
++++ b/pandas/tests/plotting/frame/test_frame_color.py
+@@ -4,6 +4,7 @@ import re
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import DataFrame
+ import pandas._testing as tm
+@@ -14,9 +15,9 @@ from pandas.tests.plotting.common import
+ )
+ from pandas.util.version import Version
+-mpl = pytest.importorskip("matplotlib")
+-plt = pytest.importorskip("matplotlib.pyplot")
+-cm = pytest.importorskip("matplotlib.cm")
++mpl = td.versioned_importorskip("matplotlib")
++plt = td.versioned_importorskip("matplotlib.pyplot")
++cm = td.versioned_importorskip("matplotlib.cm")
+ def _check_colors_box(bp, box_c, whiskers_c, medians_c, caps_c="k", fliers_c=None):
+@@ -446,7 +447,7 @@ class TestDataFrameColor:
+         _check_colors(ax.patches[::10], facecolors=["green"] * 5)
+     def test_kde_colors(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         custom_colors = "rgcby"
+         df = DataFrame(np.random.default_rng(2).random((5, 5)))
+@@ -455,14 +456,14 @@ class TestDataFrameColor:
+     @pytest.mark.parametrize("colormap", ["jet", cm.jet])
+     def test_kde_colors_cmap(self, colormap):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
+         ax = df.plot.kde(colormap=colormap)
+         rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
+         _check_colors(ax.get_lines(), linecolors=rgba_colors)
+     def test_kde_colors_and_styles_subplots(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         default_colors = _unpack_cycler(mpl.pyplot.rcParams)
+         df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
+@@ -473,14 +474,14 @@ class TestDataFrameColor:
+     @pytest.mark.parametrize("colormap", ["k", "red"])
+     def test_kde_colors_and_styles_subplots_single_col_str(self, colormap):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
+         axes = df.plot(kind="kde", color=colormap, subplots=True)
+         for ax in axes:
+             _check_colors(ax.get_lines(), linecolors=[colormap])
+     def test_kde_colors_and_styles_subplots_custom_color(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
+         custom_colors = "rgcby"
+         axes = df.plot(kind="kde", color=custom_colors, subplots=True)
+@@ -489,7 +490,7 @@ class TestDataFrameColor:
+     @pytest.mark.parametrize("colormap", ["jet", cm.jet])
+     def test_kde_colors_and_styles_subplots_cmap(self, colormap):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
+         rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
+         axes = df.plot(kind="kde", colormap=colormap, subplots=True)
+@@ -497,7 +498,7 @@ class TestDataFrameColor:
+             _check_colors(ax.get_lines(), linecolors=[c])
+     def test_kde_colors_and_styles_subplots_single_col(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
+         # make color a list if plotting one column frame
+         # handles cases like df.plot(color='DodgerBlue')
+@@ -505,7 +506,7 @@ class TestDataFrameColor:
+         _check_colors(axes[0].lines, linecolors=["DodgerBlue"])
+     def test_kde_colors_and_styles_subplots_single_char(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
+         # list of styles
+         # single character style
+@@ -514,7 +515,7 @@ class TestDataFrameColor:
+             _check_colors(ax.get_lines(), linecolors=["r"])
+     def test_kde_colors_and_styles_subplots_list(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
+         # list of styles
+         styles = list("rgcby")
+--- a/pandas/tests/plotting/frame/test_frame_groupby.py
++++ b/pandas/tests/plotting/frame/test_frame_groupby.py
+@@ -2,10 +2,11 @@
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import DataFrame
+ from pandas.tests.plotting.common import _check_visible
+-pytest.importorskip("matplotlib")
++td.versioned_importorskip("matplotlib")
+ class TestDataFramePlotsGroupby:
+--- a/pandas/tests/plotting/frame/test_frame_legend.py
++++ b/pandas/tests/plotting/frame/test_frame_legend.py
+@@ -14,7 +14,7 @@ from pandas.tests.plotting.common import
+ )
+ from pandas.util.version import Version
+-mpl = pytest.importorskip("matplotlib")
++mpl = td.versioned_importorskip("matplotlib")
+ class TestFrameLegend:
+@@ -61,7 +61,7 @@ class TestFrameLegend:
+     @pytest.mark.parametrize("kind", ["line", "bar", "barh", "kde", "area", "hist"])
+     def test_df_legend_labels(self, kind):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         df = DataFrame(np.random.default_rng(2).random((3, 3)), columns=["a", "b", "c"])
+         df2 = DataFrame(
+             np.random.default_rng(2).random((3, 3)), columns=["d", "e", "f"]
+@@ -87,7 +87,7 @@ class TestFrameLegend:
+         _check_legend_labels(ax, labels=expected)
+     def test_df_legend_labels_secondary_y(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         df = DataFrame(np.random.default_rng(2).random((3, 3)), columns=["a", "b", "c"])
+         df2 = DataFrame(
+             np.random.default_rng(2).random((3, 3)), columns=["d", "e", "f"]
+@@ -105,7 +105,7 @@ class TestFrameLegend:
+     def test_df_legend_labels_time_series(self):
+         # Time Series
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         ind = date_range("1/1/2014", periods=3)
+         df = DataFrame(
+             np.random.default_rng(2).standard_normal((3, 3)),
+@@ -131,7 +131,7 @@ class TestFrameLegend:
+     def test_df_legend_labels_time_series_scatter(self):
+         # Time Series
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         ind = date_range("1/1/2014", periods=3)
+         df = DataFrame(
+             np.random.default_rng(2).standard_normal((3, 3)),
+@@ -157,7 +157,7 @@ class TestFrameLegend:
+         _check_legend_labels(ax, labels=["data1", "data3"])
+     def test_df_legend_labels_time_series_no_mutate(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         ind = date_range("1/1/2014", periods=3)
+         df = DataFrame(
+             np.random.default_rng(2).standard_normal((3, 3)),
+--- a/pandas/tests/plotting/frame/test_frame_subplots.py
++++ b/pandas/tests/plotting/frame/test_frame_subplots.py
+@@ -6,6 +6,7 @@ import numpy as np
+ from numpy.testing import assert_array_almost_equal_nulp
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.compat import is_platform_linux
+ from pandas.compat.numpy import np_version_gte1p24
+@@ -27,8 +28,8 @@ from pandas.tests.plotting.common import
+ from pandas.io.formats.printing import pprint_thing
+-mpl = pytest.importorskip("matplotlib")
+-plt = pytest.importorskip("matplotlib.pyplot")
++mpl = td.versioned_importorskip("matplotlib")
++plt = td.versioned_importorskip("matplotlib.pyplot")
+ class TestDataFramePlotsSubplots:
+--- a/pandas/tests/plotting/frame/test_hist_box_by.py
++++ b/pandas/tests/plotting/frame/test_hist_box_by.py
+@@ -3,6 +3,7 @@ import re
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import DataFrame
+ import pandas._testing as tm
+ from pandas.tests.plotting.common import (
+@@ -12,7 +13,7 @@ from pandas.tests.plotting.common import
+     get_y_axis,
+ )
+-pytest.importorskip("matplotlib")
++td.versioned_importorskip("matplotlib")
+ @pytest.fixture
+--- a/pandas/tests/plotting/test_boxplot_method.py
++++ b/pandas/tests/plotting/test_boxplot_method.py
+@@ -6,6 +6,7 @@ import string
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import (
+     DataFrame,
+     MultiIndex,
+@@ -25,8 +26,8 @@ from pandas.tests.plotting.common import
+ from pandas.io.formats.printing import pprint_thing
+-mpl = pytest.importorskip("matplotlib")
+-plt = pytest.importorskip("matplotlib.pyplot")
++mpl = td.versioned_importorskip("matplotlib")
++plt = td.versioned_importorskip("matplotlib.pyplot")
+ def _check_ax_limits(col, ax):
+--- a/pandas/tests/plotting/test_common.py
++++ b/pandas/tests/plotting/test_common.py
+@@ -1,5 +1,6 @@
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import DataFrame
+ from pandas.tests.plotting.common import (
+     _check_plot_works,
+@@ -7,7 +8,7 @@ from pandas.tests.plotting.common import
+     _gen_two_subplots,
+ )
+-plt = pytest.importorskip("matplotlib.pyplot")
++plt = td.versioned_importorskip("matplotlib.pyplot")
+ class TestCommon:
+--- a/pandas/tests/plotting/test_converter.py
++++ b/pandas/tests/plotting/test_converter.py
+@@ -8,6 +8,7 @@ import sys
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas._config.config as cf
+ from pandas._libs.tslibs import to_offset
+@@ -41,8 +42,8 @@ except ImportError:
+     # causing an improper skip
+     pass
+-pytest.importorskip("matplotlib.pyplot")
+-dates = pytest.importorskip("matplotlib.dates")
++td.versioned_importorskip("matplotlib.pyplot")
++dates = td.versioned_importorskip("matplotlib.dates")
+ @pytest.mark.single_cpu
+@@ -79,7 +80,7 @@ class TestRegistration:
+         assert subprocess.check_call(call) == 0
+     def test_registering_no_warning(self):
+-        plt = pytest.importorskip("matplotlib.pyplot")
++        plt = td.versioned_importorskip("matplotlib.pyplot")
+         s = Series(range(12), index=date_range("2017", periods=12))
+         _, ax = plt.subplots()
+@@ -89,7 +90,7 @@ class TestRegistration:
+         plt.close()
+     def test_pandas_plots_register(self):
+-        plt = pytest.importorskip("matplotlib.pyplot")
++        plt = td.versioned_importorskip("matplotlib.pyplot")
+         s = Series(range(12), index=date_range("2017", periods=12))
+         # Set to the "warn" state, in case this isn't the first test run
+         with tm.assert_produces_warning(None) as w:
+@@ -101,7 +102,7 @@ class TestRegistration:
+             plt.close()
+     def test_matplotlib_formatters(self):
+-        units = pytest.importorskip("matplotlib.units")
++        units = td.versioned_importorskip("matplotlib.units")
+         # Can't make any assertion about the start state.
+         # We we check that toggling converters off removes it, and toggling it
+@@ -113,9 +114,9 @@ class TestRegistration:
+             assert Timestamp in units.registry
+     def test_option_no_warning(self):
+-        pytest.importorskip("matplotlib.pyplot")
++        td.versioned_importorskip("matplotlib.pyplot")
+         ctx = cf.option_context("plotting.matplotlib.register_converters", False)
+-        plt = pytest.importorskip("matplotlib.pyplot")
++        plt = td.versioned_importorskip("matplotlib.pyplot")
+         s = Series(range(12), index=date_range("2017", periods=12))
+         _, ax = plt.subplots()
+@@ -130,8 +131,8 @@ class TestRegistration:
+         plt.close()
+     def test_registry_resets(self):
+-        units = pytest.importorskip("matplotlib.units")
+-        dates = pytest.importorskip("matplotlib.dates")
++        units = td.versioned_importorskip("matplotlib.units")
++        dates = td.versioned_importorskip("matplotlib.dates")
+         # make a copy, to reset to
+         original = dict(units.registry)
+--- a/pandas/tests/plotting/test_datetimelike.py
++++ b/pandas/tests/plotting/test_datetimelike.py
+@@ -10,6 +10,7 @@ import pickle
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas._libs.tslibs import (
+     BaseOffset,
+     to_offset,
+@@ -41,7 +42,7 @@ from pandas.tests.plotting.common import
+ from pandas.tseries.offsets import WeekOfMonth
+-mpl = pytest.importorskip("matplotlib")
++mpl = td.versioned_importorskip("matplotlib")
+ class TestTSPlot:
+@@ -737,7 +738,7 @@ class TestTSPlot:
+         assert ax.get_yaxis().get_visible()
+     def test_secondary_kde(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         ser = Series(np.random.default_rng(2).standard_normal(10))
+         fig, ax = mpl.pyplot.subplots()
+         ax = ser.plot(secondary_y=True, kind="density", ax=ax)
+--- a/pandas/tests/plotting/test_groupby.py
++++ b/pandas/tests/plotting/test_groupby.py
+@@ -4,6 +4,7 @@
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import (
+     DataFrame,
+     Index,
+@@ -14,7 +15,7 @@ from pandas.tests.plotting.common import
+     _check_legend_labels,
+ )
+-pytest.importorskip("matplotlib")
++td.versioned_importorskip("matplotlib")
+ class TestDataFrameGroupByPlots:
+--- a/pandas/tests/plotting/test_hist_method.py
++++ b/pandas/tests/plotting/test_hist_method.py
+@@ -4,6 +4,7 @@ import re
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import (
+     DataFrame,
+     Index,
+@@ -25,7 +26,7 @@ from pandas.tests.plotting.common import
+     get_y_axis,
+ )
+-mpl = pytest.importorskip("matplotlib")
++mpl = td.versioned_importorskip("matplotlib")
+ @pytest.fixture
+@@ -206,7 +207,7 @@ class TestSeriesPlots:
+     @pytest.mark.xfail(reason="Api changed in 3.6.0")
+     def test_hist_kde(self, ts):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         _, ax = mpl.pyplot.subplots()
+         ax = ts.plot.hist(logy=True, ax=ax)
+         _check_ax_scales(ax, yaxis="log")
+@@ -217,16 +218,16 @@ class TestSeriesPlots:
+         _check_text_labels(ylabels, [""] * len(ylabels))
+     def test_hist_kde_plot_works(self, ts):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         _check_plot_works(ts.plot.kde)
+     def test_hist_kde_density_works(self, ts):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         _check_plot_works(ts.plot.density)
+     @pytest.mark.xfail(reason="Api changed in 3.6.0")
+     def test_hist_kde_logy(self, ts):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         _, ax = mpl.pyplot.subplots()
+         ax = ts.plot.kde(logy=True, ax=ax)
+         _check_ax_scales(ax, yaxis="log")
+@@ -236,7 +237,7 @@ class TestSeriesPlots:
+         _check_text_labels(ylabels, [""] * len(ylabels))
+     def test_hist_kde_color_bins(self, ts):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         _, ax = mpl.pyplot.subplots()
+         ax = ts.plot.hist(logy=True, bins=10, color="b", ax=ax)
+         _check_ax_scales(ax, yaxis="log")
+@@ -244,7 +245,7 @@ class TestSeriesPlots:
+         _check_colors(ax.patches, facecolors=["b"] * 10)
+     def test_hist_kde_color(self, ts):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         _, ax = mpl.pyplot.subplots()
+         ax = ts.plot.kde(logy=True, color="r", ax=ax)
+         _check_ax_scales(ax, yaxis="log")
+@@ -631,7 +632,7 @@ class TestDataFramePlots:
+     def test_hist_with_nans_and_weights(self):
+         # GH 48884
+-        mpl_patches = pytest.importorskip("matplotlib.patches")
++        mpl_patches = td.versioned_importorskip("matplotlib.patches")
+         df = DataFrame(
+             [[np.nan, 0.2, 0.3], [0.4, np.nan, np.nan], [0.7, 0.8, 0.9]],
+             columns=list("abc"),
+--- a/pandas/tests/plotting/test_misc.py
++++ b/pandas/tests/plotting/test_misc.py
+@@ -26,9 +26,9 @@ from pandas.tests.plotting.common import
+     _check_ticks_props,
+ )
+-mpl = pytest.importorskip("matplotlib")
+-plt = pytest.importorskip("matplotlib.pyplot")
+-cm = pytest.importorskip("matplotlib.cm")
++mpl = td.versioned_importorskip("matplotlib")
++plt = td.versioned_importorskip("matplotlib.pyplot")
++cm = td.versioned_importorskip("matplotlib.cm")
+ @pytest.fixture
+@@ -148,7 +148,7 @@ class TestSeriesPlots:
+ class TestDataFramePlots:
+     @pytest.mark.parametrize("pass_axis", [False, True])
+     def test_scatter_matrix_axis(self, pass_axis):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         scatter_matrix = plotting.scatter_matrix
+         ax = None
+@@ -173,7 +173,7 @@ class TestDataFramePlots:
+     @pytest.mark.parametrize("pass_axis", [False, True])
+     def test_scatter_matrix_axis_smaller(self, pass_axis):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         scatter_matrix = plotting.scatter_matrix
+         ax = None
+--- a/pandas/tests/plotting/test_series.py
++++ b/pandas/tests/plotting/test_series.py
+@@ -32,8 +32,8 @@ from pandas.tests.plotting.common import
+     get_y_axis,
+ )
+-mpl = pytest.importorskip("matplotlib")
+-plt = pytest.importorskip("matplotlib.pyplot")
++mpl = td.versioned_importorskip("matplotlib")
++plt = td.versioned_importorskip("matplotlib.pyplot")
+ @pytest.fixture
+@@ -569,16 +569,16 @@ class TestSeriesPlots:
+         ],
+     )
+     def test_kde_kwargs(self, ts, bw_method, ind):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         _check_plot_works(ts.plot.kde, bw_method=bw_method, ind=ind)
+     def test_density_kwargs(self, ts):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         sample_points = np.linspace(-100, 100, 20)
+         _check_plot_works(ts.plot.density, bw_method=0.5, ind=sample_points)
+     def test_kde_kwargs_check_axes(self, ts):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         _, ax = mpl.pyplot.subplots()
+         sample_points = np.linspace(-100, 100, 20)
+         ax = ts.plot.kde(logy=True, bw_method=0.5, ind=sample_points, ax=ax)
+@@ -586,7 +586,7 @@ class TestSeriesPlots:
+         _check_text_labels(ax.yaxis.get_label(), "Density")
+     def test_kde_missing_vals(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         s = Series(np.random.default_rng(2).uniform(size=50))
+         s[0] = np.nan
+         axes = _check_plot_works(s.plot.kde)
+@@ -609,7 +609,7 @@ class TestSeriesPlots:
+         plotting.PlotAccessor._common_kinds + plotting.PlotAccessor._series_kinds,
+     )
+     def test_kind_kwarg(self, kind):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         s = Series(range(3))
+         _, ax = mpl.pyplot.subplots()
+         s.plot(kind=kind, ax=ax)
+@@ -620,7 +620,7 @@ class TestSeriesPlots:
+         plotting.PlotAccessor._common_kinds + plotting.PlotAccessor._series_kinds,
+     )
+     def test_kind_attr(self, kind):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         s = Series(range(3))
+         _, ax = mpl.pyplot.subplots()
+         getattr(s.plot, kind)()
+@@ -636,7 +636,7 @@ class TestSeriesPlots:
+     @pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds)
+     def test_valid_object_plot(self, kind):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         s = Series(range(10), dtype=object)
+         _check_plot_works(s.plot, kind=kind)
+@@ -750,7 +750,7 @@ class TestSeriesPlots:
+     @pytest.mark.slow
+     def test_series_grid_settings(self):
+         # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         _check_grid_settings(
+             Series([1, 2, 3]),
+             plotting.PlotAccessor._series_kinds + plotting.PlotAccessor._common_kinds,
+--- a/pandas/tests/plotting/test_style.py
++++ b/pandas/tests/plotting/test_style.py
+@@ -1,8 +1,9 @@
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import Series
+-pytest.importorskip("matplotlib")
++td.versioned_importorskip("matplotlib")
+ from pandas.plotting._matplotlib.style import get_standard_colors
+--- a/pandas/tests/reductions/test_reductions.py
++++ b/pandas/tests/reductions/test_reductions.py
+@@ -7,6 +7,7 @@ from decimal import Decimal
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+     Categorical,
+@@ -1091,7 +1092,7 @@ class TestSeriesReductions:
+     def test_any_all_pyarrow_string(self):
+         # GH#54591
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         ser = Series(["", "a"], dtype="string[pyarrow_numpy]")
+         assert ser.any()
+         assert not ser.all()
+--- a/pandas/tests/reductions/test_stat_reductions.py
++++ b/pandas/tests/reductions/test_stat_reductions.py
+@@ -6,6 +6,7 @@ import inspect
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+     DataFrame,
+@@ -231,7 +232,7 @@ class TestSeriesStatReductions:
+         assert pd.isna(result)
+     def test_skew(self):
+-        sp_stats = pytest.importorskip("scipy.stats")
++        sp_stats = td.versioned_importorskip("scipy.stats")
+         string_series = Series(range(20), dtype=np.float64, name="series")
+@@ -253,7 +254,7 @@ class TestSeriesStatReductions:
+                 assert (df.skew() == 0).all()
+     def test_kurt(self):
+-        sp_stats = pytest.importorskip("scipy.stats")
++        sp_stats = td.versioned_importorskip("scipy.stats")
+         string_series = Series(range(20), dtype=np.float64, name="series")
+--- a/pandas/tests/resample/test_datetime_index.py
++++ b/pandas/tests/resample/test_datetime_index.py
+@@ -1110,7 +1110,7 @@ def test_resample_dtype_preservation(uni
+ def test_resample_dtype_coercion(unit):
+-    pytest.importorskip("scipy.interpolate")
++    td.versioned_importorskip("scipy.interpolate")
+     # GH 16361
+     df = {"a": [1, 3, 1, 4]}
+--- a/pandas/tests/reshape/merge/test_merge.py
++++ b/pandas/tests/reshape/merge/test_merge.py
+@@ -8,6 +8,7 @@ import re
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.core.dtypes.common import (
+     is_object_dtype,
+     is_string_dtype,
+@@ -2817,7 +2818,7 @@ def test_merge_ea_and_non_ea(any_numeric
+ @pytest.mark.parametrize("dtype", ["int64", "int64[pyarrow]"])
+ def test_merge_arrow_and_numpy_dtypes(dtype):
+     # GH#52406
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     df = DataFrame({"a": [1, 2]}, dtype=dtype)
+     df2 = DataFrame({"a": [1, 2]}, dtype="int64[pyarrow]")
+     result = df.merge(df2)
+@@ -2967,7 +2968,7 @@ def test_merge_ea_int_and_float_numpy():
+ def test_merge_arrow_string_index(any_string_dtype):
+     # GH#54894
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     left = DataFrame({"a": ["a", "b"]}, dtype=any_string_dtype)
+     right = DataFrame({"b": 1}, index=Index(["a", "c"], dtype=any_string_dtype))
+     result = left.merge(right, left_on="a", right_index=True, how="left")
+--- a/pandas/tests/reshape/test_melt.py
++++ b/pandas/tests/reshape/test_melt.py
+@@ -3,6 +3,7 @@ import re
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+     DataFrame,
+@@ -1224,7 +1225,7 @@ class TestWideToLong:
+ def test_wide_to_long_pyarrow_string_columns():
+     # GH 57066
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     df = DataFrame(
+         {
+             "ID": {0: 1},
+--- a/pandas/tests/series/accessors/test_list_accessor.py
++++ b/pandas/tests/series/accessors/test_list_accessor.py
+@@ -2,13 +2,14 @@ import re
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import (
+     ArrowDtype,
+     Series,
+ )
+ import pandas._testing as tm
+-pa = pytest.importorskip("pyarrow")
++pa = td.versioned_importorskip("pyarrow")
+ from pandas.compat import pa_version_under11p0
+--- a/pandas/tests/series/accessors/test_struct_accessor.py
++++ b/pandas/tests/series/accessors/test_struct_accessor.py
+@@ -2,6 +2,7 @@ import re
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.compat.pyarrow import (
+     pa_version_under11p0,
+     pa_version_under13p0,
+@@ -15,8 +16,8 @@ from pandas import (
+ )
+ import pandas._testing as tm
+-pa = pytest.importorskip("pyarrow")
+-pc = pytest.importorskip("pyarrow.compute")
++pa = td.versioned_importorskip("pyarrow")
++pc = td.versioned_importorskip("pyarrow.compute")
+ def test_struct_accessor_dtypes():
+--- a/pandas/tests/series/methods/test_convert_dtypes.py
++++ b/pandas/tests/series/methods/test_convert_dtypes.py
+@@ -3,6 +3,7 @@ from itertools import product
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas._libs import lib
+ import pandas as pd
+@@ -291,7 +292,7 @@ class TestSeriesConvertDtypes:
+     def test_convert_dtypes_pyarrow_to_np_nullable(self):
+         # GH 53648
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         ser = pd.Series(range(2), dtype="int32[pyarrow]")
+         result = ser.convert_dtypes(dtype_backend="numpy_nullable")
+         expected = pd.Series(range(2), dtype="Int32")
+@@ -299,7 +300,7 @@ class TestSeriesConvertDtypes:
+     def test_convert_dtypes_pyarrow_null(self):
+         # GH#55346
+-        pa = pytest.importorskip("pyarrow")
++        pa = td.versioned_importorskip("pyarrow")
+         ser = pd.Series([None, None])
+         result = ser.convert_dtypes(dtype_backend="pyarrow")
+         expected = pd.Series([None, None], dtype=pd.ArrowDtype(pa.null()))
+--- a/pandas/tests/series/methods/test_cov_corr.py
++++ b/pandas/tests/series/methods/test_cov_corr.py
+@@ -3,6 +3,7 @@ import math
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+     Series,
+@@ -58,7 +59,7 @@ class TestSeriesCov:
+ class TestSeriesCorr:
+     @pytest.mark.parametrize("dtype", ["float64", "Float64"])
+     def test_corr(self, datetime_series, dtype):
+-        stats = pytest.importorskip("scipy.stats")
++        stats = td.versioned_importorskip("scipy.stats")
+         datetime_series = datetime_series.astype(dtype)
+@@ -93,7 +94,7 @@ class TestSeriesCorr:
+         tm.assert_almost_equal(result, expected)
+     def test_corr_rank(self):
+-        stats = pytest.importorskip("scipy.stats")
++        stats = td.versioned_importorskip("scipy.stats")
+         # kendall and spearman
+         A = Series(
+--- a/pandas/tests/series/methods/test_drop_duplicates.py
++++ b/pandas/tests/series/methods/test_drop_duplicates.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+     Categorical,
+@@ -252,7 +253,7 @@ class TestSeriesDropDuplicates:
+         tm.assert_series_equal(result, expected)
+     def test_duplicated_arrow_dtype(self):
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         ser = Series([True, False, None, False], dtype="bool[pyarrow]")
+         result = ser.drop_duplicates()
+         expected = Series([True, False, None], dtype="bool[pyarrow]")
+@@ -260,7 +261,7 @@ class TestSeriesDropDuplicates:
+     def test_drop_duplicates_arrow_strings(self):
+         # GH#54904
+-        pa = pytest.importorskip("pyarrow")
++        pa = td.versioned_importorskip("pyarrow")
+         ser = Series(["a", "a"], dtype=pd.ArrowDtype(pa.string()))
+         result = ser.drop_duplicates()
+         expecetd = Series(["a"], dtype=pd.ArrowDtype(pa.string()))
+--- a/pandas/tests/series/methods/test_explode.py
++++ b/pandas/tests/series/methods/test_explode.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ import pandas._testing as tm
+@@ -146,7 +147,7 @@ def test_explode_scalars_can_ignore_inde
+ @pytest.mark.parametrize("ignore_index", [True, False])
+ def test_explode_pyarrow_list_type(ignore_index):
+     # GH 53602
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     data = [
+         [None, None],
+@@ -167,7 +168,7 @@ def test_explode_pyarrow_list_type(ignor
+ @pytest.mark.parametrize("ignore_index", [True, False])
+ def test_explode_pyarrow_non_list_type(ignore_index):
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     data = [1, 2, 3]
+     ser = pd.Series(data, dtype=pd.ArrowDtype(pa.int64()))
+     result = ser.explode(ignore_index=ignore_index)
+--- a/pandas/tests/series/methods/test_interpolate.py
++++ b/pandas/tests/series/methods/test_interpolate.py
+@@ -118,7 +118,7 @@ class TestSeriesInterpolateData:
+             non_ts.interpolate(method="time")
+     def test_interpolate_cubicspline(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         ser = Series([10, 11, 12, 13])
+         expected = Series(
+@@ -133,7 +133,7 @@ class TestSeriesInterpolateData:
+         tm.assert_series_equal(result, expected)
+     def test_interpolate_pchip(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         ser = Series(np.sort(np.random.default_rng(2).uniform(size=100)))
+         # interpolate at new_index
+@@ -145,7 +145,7 @@ class TestSeriesInterpolateData:
+         interp_s.loc[49:51]
+     def test_interpolate_akima(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         ser = Series([10, 11, 12, 13])
+         # interpolate at new_index where `der` is zero
+@@ -171,7 +171,7 @@ class TestSeriesInterpolateData:
+         tm.assert_series_equal(interp_s.loc[1:3], expected)
+     def test_interpolate_piecewise_polynomial(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         ser = Series([10, 11, 12, 13])
+         expected = Series(
+@@ -186,7 +186,7 @@ class TestSeriesInterpolateData:
+         tm.assert_series_equal(interp_s.loc[1:3], expected)
+     def test_interpolate_from_derivatives(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         ser = Series([10, 11, 12, 13])
+         expected = Series(
+@@ -276,14 +276,14 @@ class TestSeriesInterpolateData:
+         tm.assert_series_equal(result, expected)
+     def test_interp_quad(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
+         result = sq.interpolate(method="quadratic")
+         expected = Series([1.0, 4.0, 9.0, 16.0], index=[1, 2, 3, 4])
+         tm.assert_series_equal(result, expected)
+     def test_interp_scipy_basic(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         s = Series([1, 3, np.nan, 12, np.nan, 25])
+         # slinear
+         expected = Series([1.0, 3.0, 7.5, 12.0, 18.5, 25.0])
+@@ -618,7 +618,7 @@ class TestSeriesInterpolateData:
+         tm.assert_series_equal(result, expected)
+     def test_interp_all_good(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         s = Series([1, 2, 3])
+         result = s.interpolate(method="polynomial", order=1)
+         tm.assert_series_equal(result, s)
+@@ -645,7 +645,7 @@ class TestSeriesInterpolateData:
+                 s.interpolate(method="polynomial", order=1)
+     def test_interp_nonmono_raise(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         s = Series([1, np.nan, 3], index=[0, 2, 1])
+         msg = "krogh interpolation requires that the index be monotonic"
+         with pytest.raises(ValueError, match=msg):
+@@ -653,7 +653,7 @@ class TestSeriesInterpolateData:
+     @pytest.mark.parametrize("method", ["nearest", "pad"])
+     def test_interp_datetime64(self, method, tz_naive_fixture):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         df = Series(
+             [1, np.nan, 3], index=date_range("1/1/2000", periods=3, tz=tz_naive_fixture)
+         )
+@@ -699,7 +699,7 @@ class TestSeriesInterpolateData:
+     @pytest.mark.parametrize("method", ["polynomial", "spline"])
+     def test_no_order(self, method):
+         # see GH-10633, GH-24014
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         s = Series([0, 1, np.nan, 3])
+         msg = "You must specify the order of the spline or polynomial"
+         with pytest.raises(ValueError, match=msg):
+@@ -707,21 +707,21 @@ class TestSeriesInterpolateData:
+     @pytest.mark.parametrize("order", [-1, -1.0, 0, 0.0, np.nan])
+     def test_interpolate_spline_invalid_order(self, order):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         s = Series([0, 1, np.nan, 3])
+         msg = "order needs to be specified and greater than 0"
+         with pytest.raises(ValueError, match=msg):
+             s.interpolate(method="spline", order=order)
+     def test_spline(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         s = Series([1, 2, np.nan, 4, 5, np.nan, 7])
+         result = s.interpolate(method="spline", order=1)
+         expected = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])
+         tm.assert_series_equal(result, expected)
+     def test_spline_extrapolate(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
+         result3 = s.interpolate(method="spline", order=1, ext=3)
+         expected3 = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 6.0])
+@@ -732,7 +732,7 @@ class TestSeriesInterpolateData:
+         tm.assert_series_equal(result1, expected1)
+     def test_spline_smooth(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7])
+         assert (
+             s.interpolate(method="spline", order=3, s=0)[5]
+@@ -741,7 +741,7 @@ class TestSeriesInterpolateData:
+     def test_spline_interpolation(self):
+         # Explicit cast to float to avoid implicit cast when setting np.nan
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         s = Series(np.arange(10) ** 2, dtype="float")
+         s[np.random.default_rng(2).integers(0, 9, 3)] = np.nan
+         result1 = s.interpolate(method="spline", order=1)
+@@ -801,7 +801,7 @@ class TestSeriesInterpolateData:
+         method, kwargs = interp_methods_ind
+         if method == "pchip":
+-            pytest.importorskip("scipy")
++            td.versioned_importorskip("scipy")
+         if method == "linear":
+             result = df[0].interpolate(**kwargs)
+@@ -824,7 +824,7 @@ class TestSeriesInterpolateData:
+         are tested here.
+         """
+         # gh 21662
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         ind = pd.timedelta_range(start=1, periods=4)
+         df = pd.DataFrame([0, 1, np.nan, 3], index=ind)
+@@ -861,7 +861,7 @@ class TestSeriesInterpolateData:
+     def test_interpolate_fill_value(self):
+         # GH#54920
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         ser = Series([np.nan, 0, 1, np.nan, 3, np.nan])
+         result = ser.interpolate(method="nearest", fill_value=0)
+         expected = Series([np.nan, 0, 1, 1, 3, 0])
+--- a/pandas/tests/series/methods/test_rank.py
++++ b/pandas/tests/series/methods/test_rank.py
+@@ -56,7 +56,7 @@ def dtype(request):
+ class TestSeriesRank:
+     def test_rank(self, datetime_series):
+-        sp_stats = pytest.importorskip("scipy.stats")
++        sp_stats = td.versioned_importorskip("scipy.stats")
+         datetime_series[::2] = np.nan
+         datetime_series[:10:3] = 4.0
+@@ -269,7 +269,7 @@ class TestSeriesRank:
+     def test_rank_tie_methods_on_infs_nans(
+         self, method, na_option, ascending, dtype, na_value, pos_inf, neg_inf
+     ):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         if dtype == "float64[pyarrow]":
+             if method == "average":
+                 exp_dtype = "float64[pyarrow]"
+@@ -318,7 +318,7 @@ class TestSeriesRank:
+         ],
+     )
+     def test_rank_methods_series(self, method, op, value):
+-        sp_stats = pytest.importorskip("scipy.stats")
++        sp_stats = td.versioned_importorskip("scipy.stats")
+         xs = np.random.default_rng(2).standard_normal(9)
+         xs = np.concatenate([xs[i:] for i in range(0, 9, 2)])  # add duplicates
+--- a/pandas/tests/series/methods/test_reset_index.py
++++ b/pandas/tests/series/methods/test_reset_index.py
+@@ -3,6 +3,7 @@ from datetime import datetime
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+     DataFrame,
+@@ -170,7 +171,7 @@ class TestResetIndex:
+     def test_reset_index_drop_infer_string(self):
+         # GH#56160
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         ser = Series(["a", "b", "c"], dtype=object)
+         with option_context("future.infer_string", True):
+             result = ser.reset_index(drop=True)
+--- a/pandas/tests/series/test_api.py
++++ b/pandas/tests/series/test_api.py
+@@ -4,6 +4,7 @@ import pydoc
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+     DataFrame,
+@@ -169,7 +170,7 @@ class TestSeriesMisc:
+     def test_inspect_getmembers(self):
+         # GH38782
+-        pytest.importorskip("jinja2")
++        td.versioned_importorskip("jinja2")
+         ser = Series(dtype=object)
+         msg = "Series._data is deprecated"
+         with tm.assert_produces_warning(
+--- a/pandas/tests/series/test_constructors.py
++++ b/pandas/tests/series/test_constructors.py
+@@ -2094,7 +2094,7 @@ class TestSeriesConstructors:
+     def test_series_string_inference(self):
+         # GH#54430
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         dtype = "string[pyarrow_numpy]"
+         expected = Series(["a", "b"], dtype=dtype)
+         with pd.option_context("future.infer_string", True):
+@@ -2109,7 +2109,7 @@ class TestSeriesConstructors:
+     @pytest.mark.parametrize("na_value", [None, np.nan, pd.NA])
+     def test_series_string_with_na_inference(self, na_value):
+         # GH#54430
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         dtype = "string[pyarrow_numpy]"
+         expected = Series(["a", na_value], dtype=dtype)
+         with pd.option_context("future.infer_string", True):
+@@ -2118,7 +2118,7 @@ class TestSeriesConstructors:
+     def test_series_string_inference_scalar(self):
+         # GH#54430
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         expected = Series("a", index=[1], dtype="string[pyarrow_numpy]")
+         with pd.option_context("future.infer_string", True):
+             ser = Series("a", index=[1])
+@@ -2126,7 +2126,7 @@ class TestSeriesConstructors:
+     def test_series_string_inference_array_string_dtype(self):
+         # GH#54496
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         expected = Series(["a", "b"], dtype="string[pyarrow_numpy]")
+         with pd.option_context("future.infer_string", True):
+             ser = Series(np.array(["a", "b"]))
+@@ -2134,7 +2134,7 @@ class TestSeriesConstructors:
+     def test_series_string_inference_storage_definition(self):
+         # GH#54793
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         expected = Series(["a", "b"], dtype="string[pyarrow_numpy]")
+         with pd.option_context("future.infer_string", True):
+             result = Series(["a", "b"], dtype="string")
+@@ -2150,7 +2150,7 @@ class TestSeriesConstructors:
+     def test_series_string_inference_na_first(self):
+         # GH#55655
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         expected = Series([pd.NA, "b"], dtype="string[pyarrow_numpy]")
+         with pd.option_context("future.infer_string", True):
+             result = Series([pd.NA, "b"])
+--- a/pandas/tests/series/test_formats.py
++++ b/pandas/tests/series/test_formats.py
+@@ -6,6 +6,7 @@ from datetime import (
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas._config import using_pyarrow_string_dtype
+ import pandas as pd
+@@ -227,7 +228,7 @@ class TestSeriesRepr:
+         repr(ts2).splitlines()[-1]
+     def test_latex_repr(self):
+-        pytest.importorskip("jinja2")  # uses Styler implementation
++        td.versioned_importorskip("jinja2")  # uses Styler implementation
+         result = r"""\begin{tabular}{ll}
+ \toprule
+  & 0 \\
+--- a/pandas/tests/series/test_logical_ops.py
++++ b/pandas/tests/series/test_logical_ops.py
+@@ -4,6 +4,7 @@ import operator
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import (
+     DataFrame,
+     Index,
+@@ -533,7 +534,7 @@ class TestSeriesLogicalOps:
+     def test_pyarrow_numpy_string_invalid(self):
+         # GH#56008
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         ser = Series([False, True])
+         ser2 = Series(["a", "b"], dtype="string[pyarrow_numpy]")
+         result = ser == ser2
+--- a/pandas/tests/series/test_reductions.py
++++ b/pandas/tests/series/test_reductions.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import Series
+ import pandas._testing as tm
+@@ -53,7 +54,7 @@ def test_mode_nullable_dtype(any_numeric
+ def test_mode_infer_string():
+     # GH#56183
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     ser = Series(["a", "b"], dtype=object)
+     with pd.option_context("future.infer_string", True):
+         result = ser.mode()
+--- a/pandas/tests/strings/test_extract.py
++++ b/pandas/tests/strings/test_extract.py
+@@ -4,6 +4,7 @@ import re
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.core.dtypes.dtypes import ArrowDtype
+ from pandas import (
+@@ -718,7 +719,7 @@ def test_extractall_same_as_extract_subj
+ def test_extractall_preserves_dtype():
+     # Ensure that when extractall is called on a series with specific dtypes set, that
+     # the dtype is preserved in the resulting DataFrame's column.
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     result = Series(["abc", "ab"], dtype=ArrowDtype(pa.string())).str.extractall("(ab)")
+     assert result.dtypes[0] == "string[pyarrow]"
+--- a/pandas/tests/test_algos.py
++++ b/pandas/tests/test_algos.py
+@@ -4,6 +4,7 @@ import struct
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas._libs import (
+     algos as libalgos,
+     hashtable as ht,
+@@ -1789,7 +1790,7 @@ class TestRank:
+         ],
+     )
+     def test_scipy_compat(self, arr):
+-        sp_stats = pytest.importorskip("scipy.stats")
++        sp_stats = td.versioned_importorskip("scipy.stats")
+         arr = np.array(arr)
+--- a/pandas/tests/test_downstream.py
++++ b/pandas/tests/test_downstream.py
+@@ -44,8 +44,8 @@ def test_dask(df):
+     olduse = pd.get_option("compute.use_numexpr")
+     try:
+-        pytest.importorskip("toolz")
+-        dd = pytest.importorskip("dask.dataframe")
++        td.versioned_importorskip("toolz")
++        dd = td.versioned_importorskip("dask.dataframe")
+         ddf = dd.from_pandas(df, npartitions=3)
+         assert ddf.A is not None
+@@ -61,8 +61,8 @@ def test_dask_ufunc():
+     olduse = pd.get_option("compute.use_numexpr")
+     try:
+-        da = pytest.importorskip("dask.array")
+-        dd = pytest.importorskip("dask.dataframe")
++        da = td.versioned_importorskip("dask.array")
++        dd = td.versioned_importorskip("dask.dataframe")
+         s = Series([1.5, 2.3, 3.7, 4.0])
+         ds = dd.from_pandas(s, npartitions=2)
+@@ -78,7 +78,7 @@ def test_dask_ufunc():
+ def test_construct_dask_float_array_int_dtype_match_ndarray():
+     # GH#40110 make sure we treat a float-dtype dask array with the same
+     #  rules we would for an ndarray
+-    dd = pytest.importorskip("dask.dataframe")
++    dd = td.versioned_importorskip("dask.dataframe")
+     arr = np.array([1, 2.5, 3])
+     darr = dd.from_array(arr)
+@@ -102,15 +102,15 @@ def test_construct_dask_float_array_int_
+ def test_xarray(df):
+-    pytest.importorskip("xarray")
++    td.versioned_importorskip("xarray")
+     assert df.to_xarray() is not None
+ def test_xarray_cftimeindex_nearest():
+     # https://github.com/pydata/xarray/issues/3751
+-    cftime = pytest.importorskip("cftime")
+-    xarray = pytest.importorskip("xarray")
++    cftime = td.versioned_importorskip("cftime")
++    xarray = td.versioned_importorskip("xarray")
+     times = xarray.cftime_range("0001", periods=2)
+     key = cftime.DatetimeGregorian(2000, 1, 1)
+@@ -142,7 +142,7 @@ def test_oo_optimized_datetime_index_unp
+ def test_statsmodels():
+-    smf = pytest.importorskip("statsmodels.formula.api")
++    smf = td.versioned_importorskip("statsmodels.formula.api")
+     df = DataFrame(
+         {"Lottery": range(5), "Literacy": range(5), "Pop1831": range(100, 105)}
+@@ -151,7 +151,7 @@ def test_statsmodels():
+ def test_scikit_learn():
+-    pytest.importorskip("sklearn")
++    td.versioned_importorskip("sklearn")
+     from sklearn import (
+         datasets,
+         svm,
+@@ -164,7 +164,7 @@ def test_scikit_learn():
+ def test_seaborn():
+-    seaborn = pytest.importorskip("seaborn")
++    seaborn = td.versioned_importorskip("seaborn")
+     tips = DataFrame(
+         {"day": pd.date_range("2023", freq="D", periods=5), "total_bill": range(5)}
+     )
+@@ -172,12 +172,12 @@ def test_seaborn():
+ def test_pandas_datareader():
+-    pytest.importorskip("pandas_datareader")
++    td.versioned_importorskip("pandas_datareader")
+ @pytest.mark.filterwarnings("ignore:Passing a BlockManager:DeprecationWarning")
+ def test_pyarrow(df):
+-    pyarrow = pytest.importorskip("pyarrow")
++    pyarrow = td.versioned_importorskip("pyarrow")
+     table = pyarrow.Table.from_pandas(df)
+     result = table.to_pandas()
+     tm.assert_frame_equal(result, df)
+@@ -185,7 +185,7 @@ def test_pyarrow(df):
+ def test_yaml_dump(df):
+     # GH#42748
+-    yaml = pytest.importorskip("yaml")
++    yaml = td.versioned_importorskip("yaml")
+     dumped = yaml.dump(df)
+@@ -247,7 +247,7 @@ def test_frame_setitem_dask_array_into_n
+     olduse = pd.get_option("compute.use_numexpr")
+     try:
+-        da = pytest.importorskip("dask.array")
++        da = td.versioned_importorskip("dask.array")
+         dda = da.array([1, 2])
+         df = DataFrame({"a": ["a", "b"]})
+@@ -348,7 +348,7 @@ def test_dataframe_consortium() -> None:
+     Full testing is done at https://github.com/data-apis/dataframe-api-compat,
+     this is just to check that the entry point works as expected.
+     """
+-    pytest.importorskip("dataframe_api_compat")
++    td.versioned_importorskip("dataframe_api_compat")
+     df_pd = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
+     df = df_pd.__dataframe_consortium_standard__()
+     result_1 = df.get_column_names()
+@@ -362,7 +362,7 @@ def test_dataframe_consortium() -> None:
+ def test_xarray_coerce_unit():
+     # GH44053
+-    xr = pytest.importorskip("xarray")
++    xr = td.versioned_importorskip("xarray")
+     arr = xr.DataArray([1, 2, 3])
+     result = pd.to_datetime(arr, unit="ns")
+--- a/pandas/tests/test_nanops.py
++++ b/pandas/tests/test_nanops.py
+@@ -500,7 +500,7 @@ class TestnanopsDataFrame:
+     @pytest.mark.parametrize("ddof", range(3))
+     def test_nansem(self, ddof, skipna):
+-        sp_stats = pytest.importorskip("scipy.stats")
++        sp_stats = td.versioned_importorskip("scipy.stats")
+         with np.errstate(invalid="ignore"):
+             self.check_funs(
+@@ -559,7 +559,7 @@ class TestnanopsDataFrame:
+         return result
+     def test_nanskew(self, skipna):
+-        sp_stats = pytest.importorskip("scipy.stats")
++        sp_stats = td.versioned_importorskip("scipy.stats")
+         func = partial(self._skew_kurt_wrap, func=sp_stats.skew)
+         with np.errstate(invalid="ignore"):
+@@ -573,7 +573,7 @@ class TestnanopsDataFrame:
+             )
+     def test_nankurt(self, skipna):
+-        sp_stats = pytest.importorskip("scipy.stats")
++        sp_stats = td.versioned_importorskip("scipy.stats")
+         func1 = partial(sp_stats.kurtosis, fisher=True)
+         func = partial(self._skew_kurt_wrap, func=func1)
+@@ -704,7 +704,7 @@ class TestnanopsDataFrame:
+         self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="pearson")
+     def test_nancorr_kendall(self):
+-        sp_stats = pytest.importorskip("scipy.stats")
++        sp_stats = td.versioned_importorskip("scipy.stats")
+         targ0 = sp_stats.kendalltau(self.arr_float_2d, self.arr_float1_2d)[0]
+         targ1 = sp_stats.kendalltau(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
+@@ -714,7 +714,7 @@ class TestnanopsDataFrame:
+         self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="kendall")
+     def test_nancorr_spearman(self):
+-        sp_stats = pytest.importorskip("scipy.stats")
++        sp_stats = td.versioned_importorskip("scipy.stats")
+         targ0 = sp_stats.spearmanr(self.arr_float_2d, self.arr_float1_2d)[0]
+         targ1 = sp_stats.spearmanr(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
+@@ -724,7 +724,7 @@ class TestnanopsDataFrame:
+         self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="spearman")
+     def test_invalid_method(self):
+-        pytest.importorskip("scipy")
++        td.versioned_importorskip("scipy")
+         targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
+         targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]
+         msg = "Unknown method 'foo', expected one of 'kendall', 'spearman'"
+--- a/pandas/tests/test_optional_dependency.py
++++ b/pandas/tests/test_optional_dependency.py
+@@ -3,6 +3,7 @@ import types
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.compat._optional import (
+     VERSIONS,
+     import_optional_dependency,
+@@ -23,7 +24,7 @@ def test_import_optional():
+ def test_xlrd_version_fallback():
+-    pytest.importorskip("xlrd")
++    td.versioned_importorskip("xlrd")
+     import_optional_dependency("xlrd")
+--- a/pandas/tests/tools/test_to_datetime.py
++++ b/pandas/tests/tools/test_to_datetime.py
+@@ -1002,7 +1002,7 @@ class TestToDatetime:
+     @pytest.mark.parametrize("utc", [True, False])
+     @pytest.mark.parametrize("tz", [None, "US/Central"])
+     def test_to_datetime_arrow(self, tz, utc, arg_class):
+-        pa = pytest.importorskip("pyarrow")
++        pa = td.versioned_importorskip("pyarrow")
+         dti = date_range("1965-04-03", periods=19, freq="2W", tz=tz)
+         dti = arg_class(dti)
+@@ -1357,7 +1357,7 @@ class TestToDatetime:
+     def test_to_datetime_tz_psycopg2(self, request, cache):
+         # xref 8260
+-        psycopg2_tz = pytest.importorskip("psycopg2.tz")
++        psycopg2_tz = td.versioned_importorskip("psycopg2.tz")
+         # misc cases
+         tz1 = psycopg2_tz.FixedOffsetTimezone(offset=-300, name=None)
+@@ -3742,7 +3742,7 @@ def test_ignoring_unknown_tz_deprecated(
+ def test_from_numeric_arrow_dtype(any_numeric_ea_dtype):
+     # GH 52425
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     ser = Series([1, 2], dtype=f"{any_numeric_ea_dtype.lower()}[pyarrow]")
+     result = to_datetime(ser)
+     expected = Series([1, 2], dtype="datetime64[ns]")
+--- a/pandas/tests/tools/test_to_numeric.py
++++ b/pandas/tests/tools/test_to_numeric.py
+@@ -867,7 +867,7 @@ def test_to_numeric_dtype_backend(val, d
+ def test_to_numeric_dtype_backend_na(val, dtype):
+     # GH#50505
+     if "pyarrow" in dtype:
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         dtype_backend = "pyarrow"
+     else:
+         dtype_backend = "numpy_nullable"
+@@ -891,7 +891,7 @@ def test_to_numeric_dtype_backend_na(val
+ def test_to_numeric_dtype_backend_downcasting(val, dtype, downcast):
+     # GH#50505
+     if "pyarrow" in dtype:
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+         dtype_backend = "pyarrow"
+     else:
+         dtype_backend = "numpy_nullable"
+@@ -908,7 +908,7 @@ def test_to_numeric_dtype_backend_downca
+ def test_to_numeric_dtype_backend_downcasting_uint(smaller, dtype_backend):
+     # GH#50505
+     if dtype_backend == "pyarrow":
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+     ser = Series([1, pd.NA], dtype="UInt64")
+     result = to_numeric(ser, dtype_backend=dtype_backend, downcast="unsigned")
+     expected = Series([1, pd.NA], dtype=smaller)
+@@ -931,7 +931,7 @@ def test_to_numeric_dtype_backend_downca
+ def test_to_numeric_dtype_backend_already_nullable(dtype):
+     # GH#50505
+     if "pyarrow" in dtype:
+-        pytest.importorskip("pyarrow")
++        td.versioned_importorskip("pyarrow")
+     ser = Series([1, pd.NA], dtype=dtype)
+     result = to_numeric(ser, dtype_backend="numpy_nullable")
+     expected = Series([1, pd.NA], dtype=dtype)
+@@ -971,7 +971,7 @@ def test_invalid_dtype_backend():
+ def test_coerce_pyarrow_backend():
+     # GH 52588
+-    pa = pytest.importorskip("pyarrow")
++    pa = td.versioned_importorskip("pyarrow")
+     ser = Series(list("12x"), dtype=ArrowDtype(pa.string()))
+     result = to_numeric(ser, errors="coerce", dtype_backend="pyarrow")
+     expected = Series([1, 2, None], dtype=ArrowDtype(pa.int64()))
+--- a/pandas/tests/tools/test_to_timedelta.py
++++ b/pandas/tests/tools/test_to_timedelta.py
+@@ -6,6 +6,7 @@ from datetime import (
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas.compat import IS64
+ from pandas.errors import OutOfBoundsTimedelta
+@@ -324,7 +325,7 @@ class TestTimedeltas:
+ def test_from_numeric_arrow_dtype(any_numeric_ea_dtype):
+     # GH 52425
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     ser = Series([1, 2], dtype=f"{any_numeric_ea_dtype.lower()}[pyarrow]")
+     result = to_timedelta(ser)
+     expected = Series([1, 2], dtype="timedelta64[ns]")
+@@ -334,7 +335,7 @@ def test_from_numeric_arrow_dtype(any_nu
+ @pytest.mark.parametrize("unit", ["ns", "ms"])
+ def test_from_timedelta_arrow_dtype(unit):
+     # GH 54298
+-    pytest.importorskip("pyarrow")
++    td.versioned_importorskip("pyarrow")
+     expected = Series([timedelta(1)], dtype=f"duration[{unit}][pyarrow]")
+     result = to_timedelta(expected)
+     tm.assert_series_equal(result, expected)
+--- a/pandas/tests/window/test_online.py
++++ b/pandas/tests/window/test_online.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import (
+     DataFrame,
+     Series,
+@@ -9,7 +10,7 @@ import pandas._testing as tm
+ pytestmark = pytest.mark.single_cpu
+-pytest.importorskip("numba")
++td.versioned_importorskip("numba")
+ @pytest.mark.filterwarnings("ignore")
+--- a/pandas/tests/window/test_rolling_skew_kurt.py
++++ b/pandas/tests/window/test_rolling_skew_kurt.py
+@@ -3,6 +3,7 @@ from functools import partial
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import (
+     DataFrame,
+     Series,
+@@ -17,7 +18,7 @@ from pandas.tseries import offsets
+ @pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
+ def test_series(series, sp_func, roll_func):
+-    sp_stats = pytest.importorskip("scipy.stats")
++    sp_stats = td.versioned_importorskip("scipy.stats")
+     compare_func = partial(getattr(sp_stats, sp_func), bias=False)
+     result = getattr(series.rolling(50), roll_func)()
+@@ -27,7 +28,7 @@ def test_series(series, sp_func, roll_fu
+ @pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
+ def test_frame(raw, frame, sp_func, roll_func):
+-    sp_stats = pytest.importorskip("scipy.stats")
++    sp_stats = td.versioned_importorskip("scipy.stats")
+     compare_func = partial(getattr(sp_stats, sp_func), bias=False)
+     result = getattr(frame.rolling(50), roll_func)()
+@@ -41,7 +42,7 @@ def test_frame(raw, frame, sp_func, roll
+ @pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
+ def test_time_rule_series(series, sp_func, roll_func):
+-    sp_stats = pytest.importorskip("scipy.stats")
++    sp_stats = td.versioned_importorskip("scipy.stats")
+     compare_func = partial(getattr(sp_stats, sp_func), bias=False)
+     win = 25
+@@ -56,7 +57,7 @@ def test_time_rule_series(series, sp_fun
+ @pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
+ def test_time_rule_frame(raw, frame, sp_func, roll_func):
+-    sp_stats = pytest.importorskip("scipy.stats")
++    sp_stats = td.versioned_importorskip("scipy.stats")
+     compare_func = partial(getattr(sp_stats, sp_func), bias=False)
+     win = 25
+@@ -75,7 +76,7 @@ def test_time_rule_frame(raw, frame, sp_
+ @pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
+ def test_nans(sp_func, roll_func):
+-    sp_stats = pytest.importorskip("scipy.stats")
++    sp_stats = td.versioned_importorskip("scipy.stats")
+     compare_func = partial(getattr(sp_stats, sp_func), bias=False)
+     obj = Series(np.random.default_rng(2).standard_normal(50))
+--- a/pandas/tests/window/test_win_type.py
++++ b/pandas/tests/window/test_win_type.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
++import pandas.util._test_decorators as td
+ from pandas import (
+     DataFrame,
+     Series,
+@@ -35,7 +36,7 @@ def win_types_special(request):
+ def test_constructor(frame_or_series):
+     # GH 12669
+-    pytest.importorskip("scipy")
++    td.versioned_importorskip("scipy")
+     c = frame_or_series(range(5)).rolling
+     # valid
+@@ -47,7 +48,7 @@ def test_constructor(frame_or_series):
+ @pytest.mark.parametrize("w", [2.0, "foo", np.array([2])])
+ def test_invalid_constructor(frame_or_series, w):
+     # not valid
+-    pytest.importorskip("scipy")
++    td.versioned_importorskip("scipy")
+     c = frame_or_series(range(5)).rolling
+     with pytest.raises(ValueError, match="min_periods must be an integer"):
+         c(win_type="boxcar", window=2, min_periods=w)
+@@ -57,7 +58,7 @@ def test_invalid_constructor(frame_or_se
+ @pytest.mark.parametrize("wt", ["foobar", 1])
+ def test_invalid_constructor_wintype(frame_or_series, wt):
+-    pytest.importorskip("scipy")
++    td.versioned_importorskip("scipy")
+     c = frame_or_series(range(5)).rolling
+     with pytest.raises(ValueError, match="Invalid win_type"):
+         c(win_type=wt, window=2)
+@@ -65,14 +66,14 @@ def test_invalid_constructor_wintype(fra
+ def test_constructor_with_win_type(frame_or_series, win_types):
+     # GH 12669
+-    pytest.importorskip("scipy")
++    td.versioned_importorskip("scipy")
+     c = frame_or_series(range(5)).rolling
+     c(win_type=win_types, window=2)
+ @pytest.mark.parametrize("arg", ["median", "kurt", "skew"])
+ def test_agg_function_support(arg):
+-    pytest.importorskip("scipy")
++    td.versioned_importorskip("scipy")
+     df = DataFrame({"A": np.arange(5)})
+     roll = df.rolling(2, win_type="triang")
+@@ -89,7 +90,7 @@ def test_agg_function_support(arg):
+ def test_invalid_scipy_arg():
+     # This error is raised by scipy
+-    pytest.importorskip("scipy")
++    td.versioned_importorskip("scipy")
+     msg = r"boxcar\(\) got an unexpected"
+     with pytest.raises(TypeError, match=msg):
+         Series(range(3)).rolling(1, win_type="boxcar").mean(foo="bar")
+@@ -97,7 +98,7 @@ def test_invalid_scipy_arg():
+ def test_constructor_with_win_type_invalid(frame_or_series):
+     # GH 13383
+-    pytest.importorskip("scipy")
++    td.versioned_importorskip("scipy")
+     c = frame_or_series(range(5)).rolling
+     msg = "window must be an integer 0 or greater"
+@@ -108,7 +109,7 @@ def test_constructor_with_win_type_inval
+ def test_window_with_args(step):
+     # make sure that we are aggregating window functions correctly with arg
+-    pytest.importorskip("scipy")
++    td.versioned_importorskip("scipy")
+     r = Series(np.random.default_rng(2).standard_normal(100)).rolling(
+         window=10, min_periods=1, win_type="gaussian", step=step
+     )
+@@ -130,7 +131,7 @@ def test_window_with_args(step):
+ def test_win_type_with_method_invalid():
+-    pytest.importorskip("scipy")
++    td.versioned_importorskip("scipy")
+     with pytest.raises(
+         NotImplementedError, match="'single' is the only supported method type."
+     ):
+@@ -140,7 +141,7 @@ def test_win_type_with_method_invalid():
+ @pytest.mark.parametrize("arg", [2000000000, "2s", Timedelta("2s")])
+ def test_consistent_win_type_freq(arg):
+     # GH 15969
+-    pytest.importorskip("scipy")
++    td.versioned_importorskip("scipy")
+     s = Series(range(1))
+     with pytest.raises(ValueError, match="Invalid win_type freq"):
+         s.rolling(arg, win_type="freq")
+@@ -153,7 +154,7 @@ def test_win_type_freq_return_none():
+ def test_win_type_not_implemented():
+-    pytest.importorskip("scipy")
++    td.versioned_importorskip("scipy")
+     class CustomIndexer(BaseIndexer):
+         def get_window_bounds(self, num_values, min_periods, center, closed, step):
+@@ -167,7 +168,7 @@ def test_win_type_not_implemented():
+ def test_cmov_mean(step):
+     # GH 8238
+-    pytest.importorskip("scipy")
++    td.versioned_importorskip("scipy")
+     vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
+     result = Series(vals).rolling(5, center=True, step=step).mean()
+     expected_values = [
+@@ -188,7 +189,7 @@ def test_cmov_mean(step):
+ def test_cmov_window(step):
+     # GH 8238
+-    pytest.importorskip("scipy")
++    td.versioned_importorskip("scipy")
+     vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
+     result = Series(vals).rolling(5, win_type="boxcar", center=True, step=step).mean()
+     expected_values = [
+@@ -210,7 +211,7 @@ def test_cmov_window(step):
+ def test_cmov_window_corner(step):
+     # GH 8238
+     # all nan
+-    pytest.importorskip("scipy")
++    td.versioned_importorskip("scipy")
+     vals = Series([np.nan] * 10)
+     result = vals.rolling(5, center=True, win_type="boxcar", step=step).mean()
+     assert np.isnan(result).all()
+@@ -294,7 +295,7 @@ def test_cmov_window_corner(step):
+ )
+ def test_cmov_window_frame(f, xp, step):
+     # Gh 8238
+-    pytest.importorskip("scipy")
++    td.versioned_importorskip("scipy")
+     df = DataFrame(
+         np.array(
+             [
+@@ -321,7 +322,7 @@ def test_cmov_window_frame(f, xp, step):
+ @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4, 5])
+ def test_cmov_window_na_min_periods(step, min_periods):
+-    pytest.importorskip("scipy")
++    td.versioned_importorskip("scipy")
+     vals = Series(np.random.default_rng(2).standard_normal(10))
+     vals[4] = np.nan
+     vals[8] = np.nan
+@@ -335,7 +336,7 @@ def test_cmov_window_na_min_periods(step
+ def test_cmov_window_regular(win_types, step):
+     # GH 8238
+-    pytest.importorskip("scipy")
++    td.versioned_importorskip("scipy")
+     vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
+     xps = {
+         "hamming": [
+@@ -443,7 +444,7 @@ def test_cmov_window_regular(win_types,
+ def test_cmov_window_regular_linear_range(win_types, step):
+     # GH 8238
+-    pytest.importorskip("scipy")
++    td.versioned_importorskip("scipy")
+     vals = np.array(range(10), dtype=float)
+     xp = vals.copy()
+     xp[:2] = np.nan
+@@ -456,7 +457,7 @@ def test_cmov_window_regular_linear_rang
+ def test_cmov_window_regular_missing_data(win_types, step):
+     # GH 8238
+-    pytest.importorskip("scipy")
++    td.versioned_importorskip("scipy")
+     vals = np.array(
+         [6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, np.nan, 10.63, 14.48]
+     )
+@@ -566,7 +567,7 @@ def test_cmov_window_regular_missing_dat
+ def test_cmov_window_special(win_types_special, step):
+     # GH 8238
+-    pytest.importorskip("scipy")
++    td.versioned_importorskip("scipy")
+     kwds = {
+         "kaiser": {"beta": 1.0},
+         "gaussian": {"std": 1.0},
+@@ -638,7 +639,7 @@ def test_cmov_window_special(win_types_s
+ def test_cmov_window_special_linear_range(win_types_special, step):
+     # GH 8238
+-    pytest.importorskip("scipy")
++    td.versioned_importorskip("scipy")
+     kwds = {
+         "kaiser": {"beta": 1.0},
+         "gaussian": {"std": 1.0},
+@@ -663,7 +664,7 @@ def test_cmov_window_special_linear_rang
+ def test_weighted_var_big_window_no_segfault(win_types, center):
+     # GitHub Issue #46772
+-    pytest.importorskip("scipy")
++    td.versioned_importorskip("scipy")
+     x = Series(0)
+     result = x.rolling(window=16, center=center, win_type=win_types).var()
+     expected = Series(np.nan)
+@@ -672,7 +673,7 @@ def test_weighted_var_big_window_no_segf
+ def test_rolling_center_axis_1():
+-    pytest.importorskip("scipy")
++    td.versioned_importorskip("scipy")
+     df = DataFrame(
+         {"a": [1, 1, 0, 0, 0, 1], "b": [1, 0, 0, 1, 0, 0], "c": [1, 0, 0, 1, 0, 1]}
+     )
+--- a/pandas/util/_test_decorators.py
++++ b/pandas/util/_test_decorators.py
+@@ -79,8 +79,8 @@ def skip_if_no(package: str, min_version
+     The mark can be used as either a decorator for a test class or to be
+     applied to parameters in pytest.mark.parametrize calls or parametrized
+-    fixtures. Use pytest.importorskip if an imported moduled is later needed
+-    or for test functions.
++    fixtures. Use td.versioned_importorskip if an imported module is later
++    needed or for test functions.
+     If the import and version check are unsuccessful, then the test function
+     (or test case when used in conjunction with parametrization) will be
+@@ -171,3 +171,22 @@ skip_copy_on_write_invalid_test = pytest
+     get_option("mode.copy_on_write") is True,
+     reason="Test not valid for Copy-on-Write mode",
+ )
++
++def versioned_importorskip(*args, **kwargs):
++    """
++    (warning - this is currently Debian-specific, the name may change if upstream request this)
++
++    Return the requested module, or skip the test if it is
++    not available in a new enough version.
++
++    Intended as a replacement for pytest.importorskip that
++    defaults to requiring at least pandas' minimum version for that
++    optional dependency, rather than any version.
++
++    See import_optional_dependency for full parameter documentation.
++    """
++    try:
++        module = import_optional_dependency(*args, **kwargs)
++    except ImportError as exc:
++        pytest.skip(str(exc), allow_module_level=True)
++    return module
diff --git a/patches/xarray_version_workaround.patch b/patches/xarray_version_workaround.patch
new file mode 100644 (file)
index 0000000..007c371
--- /dev/null
@@ -0,0 +1,16 @@
+Description: Work around xarray wrongly reporting its version
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: no
+
+--- a/pandas/compat/_optional.py
++++ b/pandas/compat/_optional.py
+@@ -49,7 +49,7 @@ VERSIONS = {
+     "sqlalchemy": "2.0.0",
+     "tables": "3.8.0",
+     "tabulate": "0.9.0",
+-    "xarray": "2022.12.0",
++    "xarray": "999",#Debian xarray 2023.08.0-1 says it's 999
+     "xlrd": "2.0.1",
+     "xlsxwriter": "3.0.5",
+     "zstandard": "0.19.0",
diff --git a/patches/xfail_tests_nonintel_io.patch b/patches/xfail_tests_nonintel_io.patch
new file mode 100644 (file)
index 0000000..09a7266
--- /dev/null
@@ -0,0 +1,269 @@
+Description: HDF5 and Stata I/O are broken on some architectures
+
+Fix some issues, warn on use and xfail tests for the remainder
+
+Everything that has a run=False xfail in here should also be in
+the run-and-ignore set in debian/tests/numbatests
+
+armhf TestHDF5Store::test*encoding only sometimes crashes
+(1.1.3+dfsg-1 passed on build but failed autopkgtest)
+
+HDF5 and Stata are known to fail on big-endian architectures
+Stata was previously seen to fail on qemu-ppc64el, but not real ppc64el
+
+Author: Andreas Tille <tille@debian.org>, Graham Inggs <ginggs@debian.org>, Yaroslav Halchenko <debian@onerussian.com>, Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Bug-Debian: https://bugs.debian.org/877419
+Bug: partly https://github.com/pandas-dev/pandas/issues/54396
+Forwarded: no
+
+--- a/pyproject.toml
++++ b/pyproject.toml
+@@ -495,6 +495,7 @@ filterwarnings = [
+   "error:::pandas",
+   "error::ResourceWarning",
+   "error::pytest.PytestUnraisableExceptionWarning",
++  "ignore:Non-x86 system detected:UserWarning:pandas",
+   # TODO(PY311-minimum): Specify EncodingWarning
+   # Ignore 3rd party EncodingWarning but raise on pandas'
+   "ignore:.*encoding.* argument not specified",
+--- a/pandas/io/pytables.py
++++ b/pandas/io/pytables.py
+@@ -24,6 +24,10 @@ from typing import (
+     overload,
+ )
+ import warnings
++import platform
++import sys
++from pandas.compat import is_platform_little_endian
++warn_hdf_platform = "Non-x86 system detected, HDF(5) format I/O may give wrong results (particularly on files created with older versions) or crash - https://bugs.debian.org/877419" if (((platform.uname()[4].startswith('arm') or platform.uname()[4].startswith('aarch')) and sys.maxsize<2**33) or not is_platform_little_endian()) else False
+ import numpy as np
+@@ -560,6 +564,8 @@ class HDFStore:
+         fletcher32: bool = False,
+         **kwargs,
+     ) -> None:
++        if warn_hdf_platform:
++            warnings.warn(warn_hdf_platform)
+         if "format" in kwargs:
+             raise ValueError("format is not a defined argument for HDFStore")
+@@ -781,7 +787,10 @@ class HDFStore:
+             self._handle.flush()
+             if fsync:
+                 with suppress(OSError):
+-                    os.fsync(self._handle.fileno())
++                    if is_platform_little_endian():
++                        os.fsync(self._handle.fileno())
++                    else:
++                        os.sync() # due to a pytables bad-cast bug, fileno is invalid on 64-bit big-endian#
+     def get(self, key: str):
+         """
+--- a/pandas/io/stata.py
++++ b/pandas/io/stata.py
+@@ -29,6 +29,8 @@ from typing import (
+     cast,
+ )
+ import warnings
++from pandas.compat import is_platform_little_endian
++warn_stata_platform = "Non-x86 system detected, Stata format I/O may give wrong results (particularly on strings) - https://bugs.debian.org/877419" if not is_platform_little_endian() else False
+ import numpy as np
+@@ -971,6 +973,8 @@ class StataParser:
+         # NOTE: the byte type seems to be reserved for categorical variables
+         # with a label, but the underlying variable is -127 to 100
+         # we're going to drop the label and cast to int
++        if warn_stata_platform:
++            warnings.warn(warn_stata_platform)
+         self.DTYPE_MAP = dict(
+             [(i, np.dtype(f"S{i}")) for i in range(1, 245)]
+             + [
+--- a/pandas/tests/io/pytables/test_file_handling.py
++++ b/pandas/tests/io/pytables/test_file_handling.py
+@@ -28,6 +28,10 @@ from pandas.tests.io.pytables.common imp
+     ensure_clean_store,
+     tables,
+ )
++import platform
++import re
++import sys
++is_crashing_arch=bool((platform.uname()[4].startswith('arm') or platform.uname()[4].startswith('aarch')) and sys.maxsize<2**33) # meant for armhf, though this form will also skip on armel - uname = kernel arch
+ from pandas.io import pytables
+ from pandas.io.pytables import Term
+@@ -297,6 +301,7 @@ def test_complibs(tmp_path, lvl, lib, re
+                 assert node.filters.complib == lib
++@pytest.mark.xfail(condition=is_crashing_arch,reason="https://bugs.debian.org/790925",strict=False,run=False)
+ @pytest.mark.skipif(
+     not is_platform_little_endian(), reason="reason platform is not little endian"
+ )
+@@ -329,6 +334,7 @@ def test_encoding(setup_path):
+     ],
+ )
+ @pytest.mark.parametrize("dtype", ["category", object])
++@pytest.mark.xfail(condition=is_crashing_arch,reason="https://bugs.debian.org/790925",strict=False,run=False)
+ def test_latin_encoding(tmp_path, setup_path, dtype, val):
+     enc = "latin-1"
+     nan_rep = ""
+--- a/pandas/tests/io/pytables/test_append.py
++++ b/pandas/tests/io/pytables/test_append.py
+@@ -22,6 +22,10 @@ from pandas.tests.io.pytables.common imp
+     _maybe_remove,
+     ensure_clean_store,
+ )
++import platform
++import re
++import sys
++is_crashing_arch=bool((platform.uname()[4].startswith('arm') or platform.uname()[4].startswith('aarch')) and sys.maxsize<2**33) # meant for armhf, though this form will also skip on armel - uname = kernel arch
+ pytestmark = pytest.mark.single_cpu
+@@ -282,6 +286,7 @@ def test_append_all_nans(setup_path):
+             tm.assert_frame_equal(store["df2"], df, check_index_type=True)
++@pytest.mark.xfail(condition=is_crashing_arch,reason="https://bugs.debian.org/790925",strict=False,run=False)
+ def test_append_frame_column_oriented(setup_path):
+     with ensure_clean_store(setup_path) as store:
+         # column oriented
+--- a/pandas/tests/io/pytables/test_store.py
++++ b/pandas/tests/io/pytables/test_store.py
+@@ -30,6 +30,10 @@ from pandas.io.pytables import (
+     HDFStore,
+     read_hdf,
+ )
++import platform
++import re
++import sys
++is_crashing_arch=bool((platform.uname()[4].startswith('arm') or platform.uname()[4].startswith('aarch')) and sys.maxsize<2**33) # meant for armhf, though this form will also skip on armel - uname = kernel arch
+ pytestmark = pytest.mark.single_cpu
+@@ -880,6 +884,7 @@ def test_start_stop_fixed(setup_path):
+         df.iloc[8:10, -2] = np.nan
++@pytest.mark.xfail(condition=is_crashing_arch,reason="https://bugs.debian.org/790925",strict=False,run=False)
+ def test_select_filter_corner(setup_path):
+     df = DataFrame(np.random.default_rng(2).standard_normal((50, 100)))
+     df.index = [f"{c:3d}" for c in df.index]
+--- a/pandas/tests/io/pytables/test_read.py
++++ b/pandas/tests/io/pytables/test_read.py
+@@ -6,7 +6,7 @@ import numpy as np
+ import pytest
+ from pandas._libs.tslibs import Timestamp
+-from pandas.compat import is_platform_windows
++from pandas.compat import is_platform_windows, is_platform_little_endian
+ import pandas as pd
+ from pandas import (
+@@ -172,6 +172,7 @@ def test_pytables_native2_read(datapath)
+     assert isinstance(d1, DataFrame)
++@pytest.mark.xfail(condition=not is_platform_little_endian(),reason="known failure of hdf on non-little endian",strict=False,raises=AttributeError)
+ def test_legacy_table_fixed_format_read_py2(datapath):
+     # GH 24510
+     # legacy table with fixed format written in Python 2
+@@ -187,6 +188,7 @@ def test_legacy_table_fixed_format_read_
+     tm.assert_frame_equal(expected, result)
++@pytest.mark.xfail(condition=not is_platform_little_endian(),reason="known failure of hdf on non-little endian",strict=False,raises=AttributeError)
+ def test_legacy_table_fixed_format_read_datetime_py2(datapath):
+     # GH 31750
+     # legacy table with fixed format and datetime64 column written in Python 2
+@@ -370,6 +372,7 @@ def test_read_hdf_series_mode_r(tmp_path
+ @pytest.mark.filterwarnings(r"ignore:Period with BDay freq is deprecated:FutureWarning")
+ @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
++@pytest.mark.xfail(condition=not is_platform_little_endian(),reason="known failure of hdf on non-little endian",strict=False,raises=AttributeError)
+ def test_read_py2_hdf_file_in_py3(datapath):
+     # GH 16781
+--- a/pandas/tests/io/test_stata.py
++++ b/pandas/tests/io/test_stata.py
+@@ -34,6 +34,8 @@ from pandas.io.stata import (
+     read_stata,
+ )
++from pandas.compat import is_platform_little_endian
++pytestmark = pytest.mark.xfail(condition=not is_platform_little_endian(),reason="known failure of test_stata on non-little endian",strict=False)
+ @pytest.fixture
+ def mixed_frame():
+--- a/pandas/tests/io/pytables/test_timezones.py
++++ b/pandas/tests/io/pytables/test_timezones.py
+@@ -8,6 +8,7 @@ import pytest
+ from pandas._libs.tslibs.timezones import maybe_get_tz
+ import pandas.util._test_decorators as td
++from pandas.compat import is_platform_little_endian
+ import pandas as pd
+ from pandas import (
+@@ -312,6 +313,7 @@ def test_store_timezone(setup_path):
+         tm.assert_frame_equal(result, df)
++@pytest.mark.xfail(condition=not is_platform_little_endian(),reason="known failure of hdf on non-little endian",strict=False,raises=AttributeError)
+ def test_legacy_datetimetz_object(datapath):
+     # legacy from < 0.17.0
+     # 8260
+@@ -364,6 +366,7 @@ def test_read_with_where_tz_aware_index(
+     tm.assert_frame_equal(result, expected)
++@pytest.mark.xfail(condition=not is_platform_little_endian(),reason="known failure of hdf on non-little endian",strict=False,raises=AttributeError)
+ def test_py2_created_with_datetimez(datapath):
+     # The test HDF5 file was created in Python 2, but could not be read in
+     # Python 3.
+--- a/pandas/tests/io/test_common.py
++++ b/pandas/tests/io/test_common.py
+@@ -18,7 +18,7 @@ import tempfile
+ import numpy as np
+ import pytest
+-from pandas.compat import is_platform_windows
++from pandas.compat import is_platform_windows, is_platform_little_endian
+ import pandas.util._test_decorators as td
+ import pandas as pd
+@@ -305,11 +305,11 @@ Look,a snake,🐍"""
+                 "pyarrow",
+                 ("io", "data", "feather", "feather-0_3_1.feather"),
+             ),
+-            (
++            pytest.param(
+                 pd.read_hdf,
+                 "tables",
+                 ("io", "data", "legacy_hdf", "datetimetz_object.h5"),
+-            ),
++            marks=pytest.mark.xfail(condition=not is_platform_little_endian(),reason="known failure of hdf on non-little endian",strict=False,raises=AttributeError)),
+             (pd.read_stata, "os", ("io", "data", "stata", "stata10_115.dta")),
+             (pd.read_sas, "os", ("io", "sas", "data", "test1.sas7bdat")),
+             (pd.read_json, "os", ("io", "json", "data", "tsframe_v012.json")),
+--- a/pandas/_testing/_warnings.py
++++ b/pandas/_testing/_warnings.py
+@@ -13,6 +13,7 @@ from typing import (
+     cast,
+ )
+ import warnings
++import platform
+ from pandas.compat import PY311
+@@ -187,6 +188,8 @@ def _assert_caught_no_extra_warnings(
+                 # pyproject.toml errors on EncodingWarnings in pandas
+                 # Ignore EncodingWarnings from other libraries
+                 continue
++            if (actual_warning.category==UserWarning and "Non-x86 system detected" in str(actual_warning.message) and not bool(re.match('i.?86|x86',platform.uname()[4]))) or (actual_warning.category==RuntimeWarning and "invalid value encountered" in str(actual_warning.message) and 'mips' in platform.uname()[4]):
++                continue
+             extra_warnings.append(
+                 (
+                     actual_warning.category.__name__,
diff --git a/python-pandas-doc.doc-base b/python-pandas-doc.doc-base
new file mode 100644 (file)
index 0000000..b29cae0
--- /dev/null
@@ -0,0 +1,7 @@
+Document: python3-pandas
+Title: pandas - powerful Python data analysis toolkit
+Section: Science/Data Analysis
+
+Format: HTML
+Index: /usr/share/doc/python-pandas-doc/html/index.html
+Files: /usr/share/doc/python-pandas-doc/html/*
diff --git a/python-pandas-doc.docs b/python-pandas-doc.docs
new file mode 100644 (file)
index 0000000..8137b04
--- /dev/null
@@ -0,0 +1 @@
+doc/build/html
diff --git a/python-pandas-doc.links b/python-pandas-doc.links
new file mode 100644 (file)
index 0000000..718f2af
--- /dev/null
@@ -0,0 +1,3 @@
+usr/share/doc/python-pandas-doc/html/whatsnew/index.html.gz usr/share/doc/python-pandas-doc/NEWS.html.gz
+usr/share/doc/python-pandas-doc/html/whatsnew/index.html.gz usr/share/doc/python3-pandas/NEWS.html.gz
+usr/share/javascript/mathjax/MathJax.js usr/share/doc/python-pandas-doc/html/_static/MathJax.js
diff --git a/rules b/rules
new file mode 100755 (executable)
index 0000000..bcd1f5d
--- /dev/null
+++ b/rules
@@ -0,0 +1,130 @@
+#!/usr/bin/make -f
+# -*- mode: makefile; coding: utf-8 -*-
+
+export DEB_BUILD_MAINT_OPTIONS = hardening=+all
+DPKG_EXPORT_BUILDFLAGS = 1
+include /usr/share/dpkg/buildflags.mk
+include /usr/share/dpkg/pkg-info.mk
+
+PY3VERS := $(shell py3versions -vr)
+PY3VER := $(shell py3versions -vd)
+SOURCE_DATE:=$(shell date -u -d "@$(SOURCE_DATE_EPOCH)" "+%a, %d %b %Y" || echo "xxx, xx xxx xxxx")
+SOURCE_TIME:=$(shell date -u -d "@$(SOURCE_DATE_EPOCH)" "+%T" || echo "xx:xx:xx")
+
+UVER := $(shell echo $(DEB_VERSION_UPSTREAM) | sed -e 's,+dfsg,,g')
+# Python doesn't use ~ for rc
+UVER_PY := $(shell echo $(UVER) | sed -e 's,[~],,g')
+UVER_PYSHORT := $(shell echo $(UVER_PY) | sed -e 's,+git.*,,g')
+
+# Filter out tests with "marker expressions" and "keyword expressions". Ref: pytest(1)
+ifeq ($(DEB_HOST_ARCH),$(filter $(DEB_HOST_ARCH), amd64 i386 kfreebsd-amd64 kfreebsd-i386 x32))
+       PYTEST_MARKER := not network
+else
+       PYTEST_MARKER := not network and not slow
+endif
+ifeq (,$(findstring mips,$(DEB_HOST_ARCH))$(findstring hppa,$(DEB_HOST_ARCH))$(findstring _ia64_,_$(DEB_HOST_ARCH)_))
+       PYTEST_WARN_IGNORE :=
+else
+       # mips/hppa may have unusual NaN behaviour
+       # https://en.wikipedia.org/wiki/NaN#Encoding
+       # ia64 warns in test_groupby_quantile_all_na_group_masked (see 2.0.3+dfsg-6 build log)
+       PYTEST_WARN_IGNORE := -W "ignore:invalid value encountered:RuntimeWarning"
+endif
+
+# for matplotlib etc
+export HOME=$(CURDIR)/buildtmp
+
+# Split up the test suite to avoid running out of memory, use xvfb for plot tests, run but ignore potentially crashing tests; test_register_entrypoint expects an installed package, test_wrong_url needs internet, test_statsmodels is a circular dependency when adding a new Python version
+# The echo-random-characters is to allow searching for fails that don't show the usual test failure message i.e. crashes/errors
+# -v makes the log too long for salsa-ci
+# the grep -q is because trying to test __pycache__ is an error in pytest 8+, #1063959
+export PYBUILD_TEST_ARGS=TEST_SUCCESS=true; cd {build_dir} ; for TEST_SUBSET in {build_dir}/pandas/tests/* ; do echo $$TEST_SUBSET | grep -q -e __pycache__ || PANDAS_CI=1 LC_ALL=C.UTF-8 xvfb-run -a -s "-screen 0 1280x1024x24 -noreset" {interpreter} -m pytest -s -m "$(PYTEST_MARKER)" $(PYTEST_WARN_IGNORE) -k "not test_register_entrypoint and not test_wrong_url and not test_statsmodels" --confcutdir={build_dir}/pandas --deb-data-root-dir={dir}/pandas/tests $$TEST_SUBSET || test $$? = 5 || TEST_SUCCESS=false && echo "rdjoqkol test state = $$TEST_SUCCESS"; done ; rm -rf test-data.xml test_stata.dta .pytest_cache ; $$TEST_SUCCESS
+
+export PYBUILD_EXT_DESTDIR=debian/python3-pandas-lib
+export PYBUILD_DESTDIR=debian/python3-pandas
+
+# try to prevent unsanctioned downloads
+export  http_proxy=http://127.0.0.1:9/
+export  https_proxy=http://127.0.0.1:9/
+
+export SHELL=/bin/bash
+
+# Mega rule
+%:
+       : # Explicit build system to avoid use of all-in-1 Makefile
+       dh $@ --buildsystem=pybuild --with python3,numpy3,sphinxdoc
+
+# The *cython* rules are provided to allow using pre-built Cython files in distributions without a new enough Cython; they are not (and must not be) used in official Debian builds (including -backports).  Use instructions:
+# on a system with new enough Cython, run debian/rules cythonize - this will add a directory under debian/
+# uncomment the _uncythonize rule dependency below
+# remove the cython3 Build-Depends from d/control
+# copy to the older system and build as normal
+# To upgrade to a new upstream version, this process must be repeated
+# Warning - has not been tested for some time
+_cythonize%: override_dh_clean # force removal of previous copies
+       python$(*:2=) setup.py cython
+       D=debian/cythonized-files$(*:2=) && \
+               git rm -rf $$D || rm -rf $$D; \
+               find pandas/ -regex '.*\.c\(\|pp\)' | while read f; do \
+                       grep -q 'Generated by Cython' "$$f" || continue; \
+                       mkdir -p "$$D/$$(dirname $$f)"; \
+                       cp "$$f" "$$D/$$(dirname $$f)"; \
+                       git add -f "$$D/$$f" || true; \
+               done; \
+               echo "$(UVER)" >| $$D/VERSION; git add $$D/VERSION || true
+
+_uncythonize%:
+       : # Make sure that cythonized sources are up-to-date
+       [ "$(UVER)" = "`cat debian/cythonized-files3/VERSION`" ]
+       echo "$*" | grep -q '^3' && PY=3 || PY= ; \
+               echo "I: Using pre-Cython-ed files for Python $*"; \
+               cd debian/cythonized-files$$PY/ ; \
+               find . -regex '.*\.c\(\|pp\)' | while read f; do cp $$f ../../$$f; done
+
+cythonize: _cythonize3
+
+override_dh_clean:
+       find pandas/ -regex '.*\.c\(\|pp\)' | xargs grep -l -e 'Generated by Cython'  | xargs -r rm -f
+       # the || true is because this will fail without docutils (-arch or nodoc builds)
+       cd doc && LC_ALL=C.UTF-8 python3 make.py clean || true
+       rm -rf build buildtmp *-stamp doc/source/savefig doc/source/user_guide/styled.xlsx doc/source/index.rst
+       dh_clean
+
+override_dh_auto_build-arch: # ${PY3VERS:%=_uncythonize%}
+       mkdir -p buildtmp
+       [ -e pandas/__version.py ] || \
+       echo -e "version = '$(UVER_PY)'\nshort_version = '$(UVER_PYSHORT)'" > pandas/__version.py
+       dh_auto_build
+
+override_dh_auto_build-indep: override_dh_auto_build-arch
+ifeq (,$(filter nodoc,$(DEB_BUILD_OPTIONS)))
+       : # Build Documentation
+       cd doc && PYTHONPATH=$(CURDIR)/.pybuild/cpython3_$(PY3VER)/build LC_ALL=C.UTF-8 LANGUAGE=C.UTF-8 python3 make.py html
+       # strip build paths, object IDs and statsmodels timestamps for reproducibility, and online Javascript for privacy
+       for html in `find doc/build/html -name _modules -prune -o -name "*.html" -o -name "*.ipynb"` ; do \
+           sed -i -e 's#$(CURDIR)/.pybuild/[^/]*/build/pandas/#/usr/lib/python3/dist-packages/pandas/#g' \
+           -e 's#$(CURDIR)/debian/python3-pandas/usr/lib/python3/dist-packages/pandas/#/usr/lib/python3/dist-packages/pandas/#g' \
+           -e 's# at 0x[0-9a-f]\{8,16\}\(&gt;\|>\)# at 0xadde5de1e8ed\1#g' \
+           -e 's#\(Date:.*\)[A-Z][a-z]\+, \+[0-9]\+,\? \+[A-Z][a-z]\+,\? \+[0-9]\+#\1$(SOURCE_DATE)#g' \
+           -e 's#\(Time:.*\)[0-9][0-9]:[0-9][0-9]:[0-9][0-9]#\1$(SOURCE_TIME)#g' $${html} ; \
+       done
+endif
+
+override_dh_installdocs:
+       dh_installdocs -A *.md
+       # for NEWS.html.gz - put it here and use symlinks to avoid breaking internal links
+       gzip -c -n -9 debian/python-pandas-doc/usr/share/doc/python-pandas-doc/html/whatsnew/index.html > debian/python-pandas-doc/usr/share/doc/python-pandas-doc/html/whatsnew/index.html.gz || true
+       cp -av debian/contributors_list.txt debian/python-pandas-doc/usr/share/doc/python-pandas-doc || true
+       # deduplicate files - the ||true is because we only build-depend on jdupes if we're building documentation
+       jdupes -r -l debian/python-pandas-doc/usr/share/doc || true
+
+
+## immediately useable documentation and exemplar scripts/data
+override_dh_compress:
+       dh_compress -X.py -X.html -X.pdf -X.css -X.jpg -X.txt -X.js -X.json -X.rtc -Xobjects.inv
+
+# see PYBUILD_TEST_ARGS above
+override_dh_auto_test:
+ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS)))
+       PYBUILD_SYSTEM=custom dh_auto_test
+endif
diff --git a/salsa-ci.yml b/salsa-ci.yml
new file mode 100644 (file)
index 0000000..44b3fd4
--- /dev/null
@@ -0,0 +1,15 @@
+---
+include:
+  - https://salsa.debian.org/salsa-ci-team/pipeline/raw/master/salsa-ci.yml
+  - https://salsa.debian.org/salsa-ci-team/pipeline/raw/master/pipeline-jobs.yml
+# known not currently reproducible
+reprotest:
+  allow_failure: true
+variables:
+  SALSA_CI_LINTIAN_FAIL_WARNING: 1
+# clearer errors from reproducibility test
+  SALSA_CI_REPROTEST_ENABLE_DIFFOSCOPE: 1
+# tests are long, so run them only in build
+# shorten the build logs to make room for reporting differences
+  SALSA_CI_REPROTEST_ARGS: "--append-build-command=-Pnocheck --vary=environment.variables+=DEB_BUILD_OPTIONS=terse"
+#  SALSA_CI_DISABLE_AUTOPKGTEST: 1
diff --git a/source/format b/source/format
new file mode 100644 (file)
index 0000000..163aaf8
--- /dev/null
@@ -0,0 +1 @@
+3.0 (quilt)
diff --git a/source/lintian-overrides b/source/lintian-overrides
new file mode 100644 (file)
index 0000000..e2653a0
--- /dev/null
@@ -0,0 +1,5 @@
+# long lines in test data are not a bug
+very-long-line-length-in-source-file * [pandas/tests/io/data/*]
+very-long-line-length-in-source-file * [pandas/tests/io/sas/data/*]
+# long lines also don't equal actually missing source
+source-is-missing [pandas/tests/io/data/html/*]
diff --git a/source/options b/source/options
new file mode 100644 (file)
index 0000000..e558c0f
--- /dev/null
@@ -0,0 +1 @@
+extend-diff-ignore="^[^/]+\.egg-info/|pandas/__version.py"
diff --git a/tests/control b/tests/control
new file mode 100644 (file)
index 0000000..80c1d53
--- /dev/null
@@ -0,0 +1,120 @@
+# Check that the hard Depends are enough for import
+# cd to not-the-source-directory to get the installed pandas
+Test-Command: cd "$AUTOPKGTEST_TMP" && python3 -c "import pandas;a=pandas.DataFrame([[1,2],[3,4]])"
+Depends: python3-pandas
+Restrictions: allow-stderr
+
+# According to pandas/doc/source/install.rst, running the unit tests looks like:
+# `py.test-3 --skip-slow --skip-network /usr/lib/python3/dist-packages/pandas/ -v -rs`
+# Or simply `python3 -c "import pandas as pd; pd.test()"`, which doesn't require
+# us to specify the path (pandas.__path__) in command line.
+# See: pandas/util/_tester.py
+Tests: unittests3
+# xml test_wrong_url fails without ca-certificates
+Depends: python3-pandas,
+         ca-certificates,
+         locales-all,
+         python3-all,
+# python3-blosc is not actually used (blosc is used through python3-tables instead)
+         python3-bottleneck (>= 1.3.6~),
+         python3-bs4 (>= 4.11.2~),
+         python3-dask (>= 2023.2.0~),
+         python3-dateutil,
+         python3-fsspec (>= 2022.11.0~),
+         python3-html5lib (>= 1.1~),
+         python3-hypothesis (>= 6.46.1~),
+         python3-jinja2 (>= 3.1.2~),
+         python3-lxml (>= 4.9.2~),
+         python3-matplotlib (>= 3.6.3~) [!ia64 !sh4 !x32],
+# architectures here are the ones on which to treat numba-related failures as RC - see also debian/control
+# temporarily disabled as numba is not in testing #1033907         python3-numba (>= 0.56.4~) [amd64 i386 ppc64el],
+         python3-numexpr (>= 2.8.4~),
+         python3-numpy (>= 1:1.23.2~),
+         python3-odf (>= 1.4.1~),
+         python3-openpyxl (>= 3.1.0~),
+# doesn't seem to work in this test environment         python3-psycopg2 (>= 2.9.6~),
+         python3-py,
+# doesn't seem to work in this test environment         python3-pymysql (>= 1.0.2~),
+         python3-pyqt5 (>= 5.15.9~),
+         python3-pyreadstat (>= 1.2.0~),
+         python3-pytest (>= 7.3.2~),
+         python3-pytest-asyncio (>= 0.17~),
+         python3-pytest-forked,
+         python3-pytest-localserver,
+         python3-pytest-xdist (>= 2.2.0~),
+         python3-pytestqt (>= 4.2.0~),
+# we don't have python3-pyxlsb
+         python3-scipy (>= 1.10.0~),
+         python3-setuptools (>= 51~),
+         python3-sqlalchemy (>= 2.0.0~),
+# python3-tables is now little-endian only, and also unavailable on some ports
+         python3-tables (>= 3.8.0~) [!s390x !hppa !powerpc !ppc64 !sparc64 !hurd-any !alpha],
+         python3-tabulate (>= 0.9.0~),
+         python3-tk,
+         python3-tz (>= 2022.7~),
+         python3-xarray (>= 2022.12.0~),
+         python3-xlrd (>= 2.0.1~),
+         python3-xlsxwriter (>= 3.0.5~),
+         python3-zstandard (>= 0.19.0~),
+         tzdata-legacy,
+         xauth,
+         xvfb,
+         xsel
+Restrictions: allow-stderr, needs-internet
+
+# this test does not actually fail on failure - it exists to produce a log for monitoring known issues
+# (we can't use plain xfails for this, because some of these crash not just fail)
+Tests: ignoredtests
+# identical dependencies to above except that:
+# numba is required on all architectures
+# psycopg2/pymysql are included
+Depends: python3-pandas,
+         ca-certificates,
+         locales-all,
+         python3-all,
+# python3-blosc is not actually used (blosc is used through python3-tables instead)
+         python3-bottleneck (>= 1.3.6~),
+         python3-bs4 (>= 4.11.2~),
+         python3-dask (>= 2023.2.0~),
+         python3-dateutil,
+         python3-fsspec (>= 2022.11.0~),
+         python3-html5lib (>= 1.1~),
+         python3-hypothesis (>= 6.46.1~),
+         python3-jinja2 (>= 3.1.2~),
+         python3-lxml (>= 4.9.2~),
+         python3-matplotlib (>= 3.6.3~) [!ia64 !sh4 !x32],
+# architectures here are the ones on which to treat numba-related failures as RC - see also debian/control
+# temporarily disabled as numba is not in testing #1033907         python3-numba (>= 0.56.4~),
+         python3-numexpr (>= 2.8.4~),
+         python3-numpy (>= 1:1.23.2~),
+         python3-odf (>= 1.4.1~),
+         python3-openpyxl (>= 3.1.0~),
+         python3-psycopg2 (>= 2.9.6~),
+         python3-py,
+         python3-pymysql (>= 1.0.2~),
+         python3-pyqt5 (>= 5.15.9~),
+         python3-pyreadstat (>= 1.2.0~),
+         python3-pytest (>= 7.3.2~),
+         python3-pytest-asyncio (>= 0.17~),
+         python3-pytest-forked,
+         python3-pytest-localserver,
+         python3-pytest-xdist (>= 2.2.0~),
+         python3-pytestqt (>= 4.2.0~),
+# we don't have python3-pyxlsb
+         python3-scipy (>= 1.10.0~),
+         python3-setuptools (>= 51~),
+         python3-sqlalchemy (>= 2.0.0~),
+# python3-tables is now little-endian only, and also unavailable on some ports
+         python3-tables (>= 3.8.0~) [!s390x !hppa !powerpc !ppc64 !sparc64 !hurd-any !alpha],
+         python3-tabulate (>= 0.9.0~),
+         python3-tk,
+         python3-tz (>= 2022.7~),
+         python3-xarray (>= 2022.12.0~),
+         python3-xlrd (>= 2.0.1~),
+         python3-xlsxwriter (>= 3.0.5~),
+         python3-zstandard (>= 0.19.0~),
+         tzdata-legacy,
+         xauth,
+         xvfb,
+         xsel
+Restrictions: allow-stderr, needs-internet
diff --git a/tests/ignoredtests b/tests/ignoredtests
new file mode 100755 (executable)
index 0000000..fbd2e50
--- /dev/null
@@ -0,0 +1,21 @@
+#!/bin/bash
+# this intentionally doesn't actually fail (isn't set -e), as it exists to run known possibly-crashing tests to get a log of whether they still fail
+
+arch=$(dpkg --print-architecture)
+pys="$(py3versions -s 2>/dev/null)"
+sourcetestroot="$PWD/pandas/tests"
+
+cd "$AUTOPKGTEST_TMP"
+TEST_SUCCESS=true
+for py in $pys; do
+       echo "=== $py ==="
+       modpath=$($py -c 'import pandas as pd; print(pd.__path__[0])')
+       echo "tests that use numba (may crash on non-x86) - checked with grep -rl -e numba pandas/tests - -m not slow because there are enough to time out otherwise"
+    PANDAS_CI=1 LC_ALL=C.UTF-8 xvfb-run --auto-servernum --server-args="-screen 0 1024x768x24" $py -m pytest --forked --tb=long -s -m "not slow" --deb-data-root-dir=$sourcetestroot --confcutdir=$modpath $modpath/tests/frame/test_ufunc.py $modpath/tests/groupby/test_numba.py $modpath/tests/groupby/test_timegrouper.py $modpath/tests/groupby/transform/test_numba.py $modpath/tests/groupby/aggregate/test_numba.py $modpath/tests/util/test_numba.py $modpath/tests/window $TEST_SUBSET 2>&1
+    echo "tests with a run=False xfail for hdf5 crashes - see xfail_tests_nonintel_io.patch"
+    PANDAS_CI=1 LC_ALL=C.UTF-8 xvfb-run --auto-servernum --server-args="-screen 0 1024x768x24" $py -m pytest --forked --runxfail --tb=long -s --deb-data-root-dir=$sourcetestroot --confcutdir=$modpath $modpath/tests/io/pytables/test_file_handling.py $modpath/tests/io/pytables/test_append.py $modpath/tests/io/pytables/test_store.py
+    echo "pymysql/psycopg2 tests, which do not work in this test environment"
+    PANDAS_CI=1 LC_ALL=C.UTF-8 xvfb-run --auto-servernum --server-args="-screen 0 1024x768x24" $py -m pytest --forked --tb=long -s --deb-data-root-dir=$sourcetestroot --confcutdir=$modpath $modpath/tests/io/test_sql.py $modpath/tests/tools/test_to_datetime.py
+done
+# this intentionally doesn't actually fail, as it exists to run known possibly-crashing tests to get a log of whether they still fail
+true
diff --git a/tests/unittests3 b/tests/unittests3
new file mode 100755 (executable)
index 0000000..923ec81
--- /dev/null
@@ -0,0 +1,40 @@
+#!/bin/bash
+set -eu
+set -x
+
+arch=$(dpkg --print-architecture)
+pys="$(py3versions -s 2>/dev/null)"
+sourcetestroot="$PWD/pandas/tests"
+# some tests _require_ the treat-warnings-as-errors set here
+# (as they use pytest.raises to catch what would normally be a warning)
+tomlfile="$PWD/pyproject.toml"
+
+# Debian: Enable "slow" tests on x86 to keep the code coverage.
+# Ubuntu: Disable "slow" tests on ALL architectures.
+if (echo amd64 i386 | grep $arch >/dev/null) && [ "Debian" = $(dpkg-vendor --query vendor) ]; then
+       marker=''
+else
+       marker='not slow'
+fi
+# mips/hppa may have unusual NaN behaviour
+# https://en.wikipedia.org/wiki/NaN#Encoding
+if (echo $arch | grep -E "mips|hppa" >/dev/null) ; then
+       PYTEST_WARN_IGNORE="-W ignore:invalid value encountered:RuntimeWarning"
+else
+       PYTEST_WARN_IGNORE=
+fi
+
+cd "$AUTOPKGTEST_TMP"
+# Run in sections to avoid out-of-memory crash (#943732)
+# exit code 5 means no tests in this file
+# the grep -q is because trying to test __pycache__ is an error in pytest 8+, #1063959
+TEST_SUCCESS=true
+for py in $pys; do
+       echo "=== $py ==="
+       modpath=$($py -c 'import pandas as pd; print(pd.__path__[0])')
+       for TEST_SUBSET in $modpath/tests/* ; do
+        echo $TEST_SUBSET | grep -q -e __pycache__ || PANDAS_CI=1 LC_ALL=C.UTF-8 xvfb-run --auto-servernum --server-args="-screen 0 1024x768x24" \
+        $py -m pytest --tb=long -s -m "$marker" $PYTEST_WARN_IGNORE -c $tomlfile --deb-data-root-dir=$sourcetestroot --rootdir=$modpath $TEST_SUBSET 2>&1 || test $? == 5 || TEST_SUCCESS=false && echo "rdjoqkol test state = $TEST_SUCCESS"
+    done
+done
+$TEST_SUCCESS
diff --git a/upstream/metadata b/upstream/metadata
new file mode 100644 (file)
index 0000000..5b32084
--- /dev/null
@@ -0,0 +1,14 @@
+Bug-Submit: https://github.com/pydata/pandas/issues/new
+Repository: https://github.com/pydata/pandas.git
+Documentation: https://pandas.pydata.org/pandas-docs/stable
+Bug-Database: https://github.com/pydata/pandas/issues
+Contact: https://pandas.pydata.org/community.html
+Reference:
+  Title: "pandas: a Foundational Python Library for Data Analysis and Statistics"
+  Eprint: https://www.scribd.com/doc/71048089/pandas-a-Foundational-Python-Library-for-Data-Analysis-and-Statistics
+  Author: McKinney, Wes
+  Booktitle: presented at PyHPC
+  Year: 2011
+Other-References: https://pandas.pydata.org/talks.html
+Repository-Browse: https://github.com/pydata/pandas
+Security-Contact: https://github.com/pydata/pandas/tree/HEAD/.github/SECURITY.md
diff --git a/watch b/watch
new file mode 100644 (file)
index 0000000..706c551
--- /dev/null
+++ b/watch
@@ -0,0 +1,3 @@
+version=4
+opts="mode=git,gitexport=all,dversionmangle=s/.dfsg[0-9]*$//,uversionmangle=s/v//;s/rc/~rc/,repacksuffix=+dfsg" \
+ https://github.com/pandas-dev/pandas refs/tags/v([0-9.]+)