--- /dev/null
+pandas (2.2.3+dfsg-5) unstable; urgency=medium
+
+ * Docs: switch back to pydata-sphinx-theme.
+ * Docs: (re-)add build-dependencies we now can satisfy.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Mon, 21 Oct 2024 19:43:11 +0100
+
+pandas (2.2.3+dfsg-4) unstable; urgency=medium
+
+ * Re-enable docs using a different theme (workaround for #1084781).
+ * Re-add dropped build-depends.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Fri, 11 Oct 2024 10:18:43 +0100
+
+pandas (2.2.3+dfsg-3) unstable; urgency=medium
+
+ * Ignore docs build fail (workaround for #1084781) and print log.
+ * Docs: remove old pydata-sphinx-theme workarounds.
+ * Temporarily drop some build-depends to break bootstrap cycle.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Tue, 08 Oct 2024 22:14:45 +0100
+
+pandas (2.2.3+dfsg-2) unstable; urgency=medium
+
+ * Move pyreadstat to Build-Depends-Indep to break cycle.
+ * Allow building on ports without full test-Depends.
+ * Re-enable sqlalchemy tests.
+ * Stop using pkg_resources. (Closes: #1083523)
+ * Tests: stop passing non-constant pytz.timezone
+ directly to datetime.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 06 Oct 2024 16:40:59 +0100
+
+pandas (2.2.3+dfsg-1) unstable; urgency=medium
+
+ * New upstream release. (Closes: #1082096)
+ * Update contributors, refresh patches.
+ * Tests: keep 32-bit intervaltree xfailed.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sat, 21 Sep 2024 17:21:40 +0100
+
+pandas (2.2.2+dfsg-4) unstable; urgency=medium
+
+ * Tests: re-enable bottleneck and tabulate (see #1070359, #1070360),
+ make blosc xfail nonstrict, use pyproject.toml in autopkgtest,
+ run autopkgtest in CI, be less verbose to fit in the CI log.
+ * Add transition Breaks.
+ * Upload to unstable. (Closes: #1069792)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 07 Jul 2024 19:36:37 +0100
+
+pandas (2.2.2+dfsg-3) experimental; urgency=medium
+
+ * Tests: add forgotten import.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Mon, 06 May 2024 14:47:52 +0100
+
+pandas (2.2.2+dfsg-2) experimental; urgency=medium
+
+ * Allow importing pandas._testing without pytest.
+ * Tests: don't require 32-bit to imply time32.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Mon, 06 May 2024 11:29:54 +0100
+
+pandas (2.2.2+dfsg-1) experimental; urgency=medium
+
+ * New upstream release. Update copyright, patches, depends.
+ * Upload to experimental, due to potential breakage (see #1069792).
+ * Tests: use our test data path, skip too-old dependencies,
+ mark some tests as requiring optional dependencies,
+ remove no longer needed patches, clean up afterwards.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 05 May 2024 14:40:45 +0100
+
+pandas (2.1.4+dfsg-8) unstable; urgency=medium
+
+ * Re-enable the documentation.
+ * Bump Standards-Version to 4.7.0 (no changes needed).
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 21 Apr 2024 13:50:13 +0100
+
+pandas (2.1.4+dfsg-7) unstable; urgency=medium
+
+ * Tests: don't require 32-bit to imply time32. (Closes: #1068104)
+ * Temporarily disable the documentation (workaround for #1068349).
+ * Tests: temporarily ignore dask tests (workaround for #1068422).
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Fri, 05 Apr 2024 22:44:46 +0100
+
+pandas (2.1.4+dfsg-6) unstable; urgency=medium
+
+ * Tests: try again to stop testing __pycache__. (Closes: #1063959)
+ * Tests: avoid FutureWarning from newer xarray. (Closes: #1066801)
+ * Drop pytables test-depends (skipping tests) on architectures
+ where it is not available. (Closes: #1064384)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sat, 16 Mar 2024 17:11:33 +0000
+
+pandas (2.1.4+dfsg-5) unstable; urgency=medium
+
+ * Tests: stop trying to test __pycache__. (Closes: #1063959)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 18 Feb 2024 20:31:18 +0000
+
+pandas (2.1.4+dfsg-4) unstable; urgency=medium
+
+ * Tests: shorten ignoredtests to avoid timeout.
+ * Temporarily skip numba tests (workaround for #1033907).
+ * Update transition Breaks.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Fri, 09 Feb 2024 20:48:14 +0000
+
+pandas (2.1.4+dfsg-3) unstable; urgency=medium
+
+ * Add more transition Breaks (see #1043240).
+ * Upload to unstable. (Closes: #1056828)
+ * Tests: don't fail when (random) sum test input sums to near-0,
+ use our paths, depend on pytest-localserver, skip broken test.
+ * Docs: re-enable style.ipynb.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Thu, 01 Feb 2024 07:52:49 +0000
+
+pandas (2.1.4+dfsg-2) experimental; urgency=medium
+
+ * Fix autopkgtest syntax error.
+ * Add more transition Breaks.
+ * Tests: try again to ignore 1**NaN, NaN**0 NaNs on mips*.
+ * Tests: be compatible with current blosc2. (Closes: #1061043)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 21 Jan 2024 18:44:54 +0000
+
+pandas (2.1.4+dfsg-1) experimental; urgency=medium
+
+ * Upstream bugfix release. Update contributors_list and patches.
+ * Add transition Breaks.
+ * Temporarily disable style.ipynb (workaround for #1057309).
+ * Tests: ignore some DeprecationWarnings and pytables exceptions
+ to allow building in Python 3.12 (see #1055801).
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Fri, 08 Dec 2023 22:06:00 +0000
+
+pandas (2.1.3+dfsg-1) experimental; urgency=medium
+
+ * Upstream bugfix release. Update contributors_list and patches.
+ * Re-enable numexpr for **, as upstream have decided to accept
+ its different overflow behaviour.
+ * Tests: fix autopkgtest syntax errors.
+ * Allow building on ports without pyreadstat or matplotlib.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 12 Nov 2023 21:53:27 +0000
+
+pandas (2.1.1+dfsg-2) experimental; urgency=medium
+
+ * Tests: don't fail when xlsxwriter is not installed.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Fri, 13 Oct 2023 19:43:13 +0100
+
+pandas (2.1.1+dfsg-1) experimental; urgency=medium
+
+ * Upstream bugfix release. Update contributors_list and patches.
+ * Don't test-depend on tabulate, because our version is too old.
+ * Extend find_test_data to cover a new fixture.
+ * Tests: try again to ignore 1**NaN, NaN**0 NaNs on mips*.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Wed, 11 Oct 2023 07:40:12 +0100
+
+pandas (2.1.0+dfsg-1) experimental; urgency=medium
+
+ * New upstream release. Update copyright, patches, dependencies.
+ * Tests: try again to ignore 1**NaN, NaN**0 NaNs on mips*,
+ ignore warnings on Itanium,
+ run with fixed locale set, skip not fail without matplotlib.
+ * Docs: fix bug in previous reproducibility patch.
+ * d/watch: Use git, as the tarball now excludes docs and test data.
+ * Don't crash in setup.py clean.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 03 Sep 2023 22:06:46 +0100
+
+pandas (2.0.3+dfsg-6) experimental; urgency=medium
+
+ * Tests: try again to ignore NaN warnings on mips*.
+ * Build on ports without sqlalchemy or working matplotlib.
+ * Reproducibility: strip object addresses,
+ disable some timestamps and random IDs.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 27 Aug 2023 11:17:44 +0100
+
+pandas (2.0.3+dfsg-5) experimental; urgency=medium
+
+ * Fix SAS I/O crash on armhf.
+ * Clean up after documentation build.
+ * Tests: ignore NaN warnings and 1**NaN, NaN**0 NaNs on mips*.
+ * Docs: use fixed seeds for reproducibility.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Fri, 25 Aug 2023 21:26:09 +0100
+
+pandas (2.0.3+dfsg-4) experimental; urgency=medium
+
+ * Ignore numba errors on most non-x86 systems (workaround
+ for #1033907). This already warns the user.
+ * Run but ignore some previously skipped tests.
+ * Tests: revert "don't use : in numexpr variable names"
+ as that wasn't actually the problem.
+ * Tests: ignore another non-warning on armel.
+ * Fix spelling.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 20 Aug 2023 09:26:03 +0100
+
+pandas (2.0.3+dfsg-3) experimental; urgency=medium
+
+ * Don't use numexpr for **, as it has different overflow behaviour.
+ * Tests: don't use : in numexpr variable names.
+ * Fix missing import on arm*.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Thu, 17 Aug 2023 19:51:45 +0100
+
+pandas (2.0.3+dfsg-2) experimental; urgency=medium
+
+ * Clean up after tests.
+ * Don't build in bare C locale.
+ * Docs: make more plots reproducible.
+ * Remove the warning that results may be wrong on mips*,
+ as this appears to have been fixed.
+ * Tests: don't fail on warnings we added.
+ * Remove unnecessary HDF/Stata I/O warnings.
+ * Depend on system tzdata not PyPI tzdata (see #1043968).
+ Use tzdata-legacy where old-style timezone names are used.
+ * Tests: re-enable dask tests (see #1043093).
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 13 Aug 2023 21:55:15 +0100
+
+pandas (2.0.3+dfsg-1) experimental; urgency=medium
+
+ * New upstream release. Drop/refresh patches, update dependencies.
+ * Upload to experimental, due to potential breakage.
+ * Add dask Breaks and disable tests (see #1043093).
+ * Tests: re-enable numba tests.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 06 Aug 2023 11:02:39 +0100
+
+pandas (1.5.3+dfsg-4) unstable; urgency=medium
+
+ * Docs: allow building with Sphinx 7. (Closes: #1042672)
+ * Remove unused python3-six dependencies. (Closes: #1039441)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 30 Jul 2023 22:34:06 +0100
+
+pandas (1.5.3+dfsg-3) unstable; urgency=medium
+
+ * Tests: don't fail with fsspec 2023. (Closes: #1042043)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Wed, 26 Jul 2023 07:57:11 +0100
+
+pandas (1.5.3+dfsg-2) unstable; urgency=medium
+
+ * Tests: use a non-backzone timezone. (Closes: #1031437)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 19 Feb 2023 11:01:48 +0000
+
+pandas (1.5.3+dfsg-1) unstable; urgency=medium
+
+ * Upstream bugfix release. Update copyright and patches.
+ * Tests: ignore plot rounding errors. (Closes: #1029251)
+ * Tests: re-enable dask test, ignore numpy 1.24 warning.
+ * Docs: re-enable style.ipynb.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 22 Jan 2023 11:54:30 +0000
+
+pandas (1.5.2+dfsg-6) unstable; urgency=medium
+
+ * Move xarray to Build-Depends-Indep to break circular dependency.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Wed, 11 Jan 2023 07:34:28 +0000
+
+pandas (1.5.2+dfsg-5) unstable; urgency=medium
+
+ * Fix or ignore warnings from numpy 1.24.
+ * Stop ignoring tests on mips*, thanks to Adrian Bunk.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Tue, 10 Jan 2023 20:48:34 +0000
+
+pandas (1.5.2+dfsg-4) unstable; urgency=medium
+
+ * Add dask transition Breaks (see #1025393).
+ * Don't try to load intersphinx links from python-numpy-doc,
+ as it no longer exists.
+ * Upload to unstable. (Closes: #1023965, #1022571)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Mon, 09 Jan 2023 19:45:45 +0000
+
+pandas (1.5.2+dfsg-3) experimental; urgency=medium
+
+ * Tests: ignore a numpy warning.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sat, 07 Jan 2023 10:09:11 +0000
+
+pandas (1.5.2+dfsg-2) experimental; urgency=medium
+
+ * Fix date_range overflow in 32-bit. (Closes: #1026351)
+ * Don't try to load intersphinx links from python-matplotlib-doc,
+ as it no longer exists. (Closes: #1027576)
+ * Re-enable parso-using tests (as #1023896 has been fixed).
+ * Bump Standards-Version to 4.6.2 (no changes needed).
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Fri, 06 Jan 2023 21:36:03 +0000
+
+pandas (1.5.2+dfsg-1) experimental; urgency=medium
+
+ * Upstream bugfix release. Update contributors_list.
+ * Fix Lintian typo.
+ * Xfail parso-using tests (workaround for #1023896).
+ * Temporarily drop numba test-depends (skips tests),
+ as it is uninstallable due to #1024795.
+ * Add transition Breaks (see #1022571).
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Wed, 30 Nov 2022 21:48:47 +0000
+
+pandas (1.3.5+dfsg-6) unstable; urgency=medium
+
+ * Team upload
+ * Ignore test failures for first build with Python 3.11,
+ see #1023965
+
+ -- Graham Inggs <ginggs@debian.org> Sun, 13 Nov 2022 10:36:51 +0000
+
+pandas (1.5.1+dfsg-3) experimental; urgency=medium
+
+ * Revert minimum Cython version.
+ * Tests: fix another little-endian assumption.
+ * Silence some Lintian messages.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Thu, 10 Nov 2022 21:36:53 +0000
+
+pandas (1.5.1+dfsg-2) experimental; urgency=medium
+
+ * Tests: xfail rolling.var/std rounding error on i386,
+ don't assume little-endian,
+ re-disable some SQL tests our setup blocks.
+ * Temporarily lower minimum Cython version (see LP#1995992).
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Wed, 09 Nov 2022 21:17:44 +0000
+
+pandas (1.5.1+dfsg-1) experimental; urgency=medium
+
+ * New upstream release. Update copyright and patches.
+ * Include more of the optional test-depends, for more test coverage.
+ * Update minimum dependency versions.
+ * Docs: update Sphinx extension dependencies,
+ use pydata theme but disable analytics (for privacy) and
+ features requiring dependencies we don't have.
+ * Tests: use the upstream mechanism to skip non-CI-friendly
+ (e.g. excessively resource-heavy) tests.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Mon, 07 Nov 2022 21:02:49 +0000
+
+pandas (1.4.3+dfsg-6) experimental; urgency=medium
+
+ * Fix NaT bug introduced by previous patch.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Wed, 24 Aug 2022 22:01:26 +0100
+
+pandas (1.4.3+dfsg-5) experimental; urgency=medium
+
+ * Fix bounds checking in float-to-datetime conversion.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Tue, 23 Aug 2022 22:24:15 +0100
+
+pandas (1.4.3+dfsg-4) experimental; urgency=medium
+
+ * Tests: skip another s390x numba issue, new riscv64 date error test.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Mon, 22 Aug 2022 22:25:08 +0100
+
+pandas (1.4.3+dfsg-3) experimental; urgency=medium
+
+ * Don't warn on non-x86 NaN->datetime cast,
+ as this has been fixed in numpy (#877754).
+ * Tests: fix architecture detection, skip another s390x numba crash,
+ extend hurd_compat.patch, more output around riscv64 date errors.
+ * Add transition Breaks (see #1017809).
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 21 Aug 2022 18:13:03 +0100
+
+pandas (1.4.3+dfsg-2) experimental; urgency=medium
+
+ * Tests: don't assume little-endian,
+ don't try to cast pytz version to float,
+ ignore a missing numpy warning on armel.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sat, 20 Aug 2022 10:32:58 +0100
+
+pandas (1.4.3+dfsg-1) experimental; urgency=medium
+
+ * New upstream release. Update copyright and patches.
+ * Upload to experimental, due to potential breakage.
+ * Remove xlwt Recommends.
+ * Update build/test-Depends.
+ * Re-enable numba (see #1000336).
+ * Update README.source.
+ * Update Lintian override format (see #1007002).
+ * Tests: add a searchable failure message to crashes/errors,
+ be less verbose so the log fits in the Salsa CI size limit,
+ remove a now-useless xlrd test, xfail some tests on 32-bit.
+ * Bump Standards-Version to 4.6.1 (no changes needed).
+ * Show the numba non-x86 warning on more (all?) numba uses.
+ * Drop stable_test_urls.patch.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Wed, 27 Jul 2022 22:17:33 +0100
+
+pandas (1.3.5+dfsg-5) unstable; urgency=medium
+
+ * Fix FTBFS with Sphinx 5. (Closes: #1013375)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 26 Jun 2022 15:06:07 +0100
+
+pandas (1.3.5+dfsg-4) unstable; urgency=medium
+
+ * Temporarily skip numba tests. (Closes: #1008179)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Fri, 25 Mar 2022 20:57:26 +0000
+
+pandas (1.3.5+dfsg-3) unstable; urgency=medium
+
+ * Tests: be compatible with new fsspec. (Closes: #1006170)
+ * Re-enable numba tests.
+ * Fix pyversions call.
+ * Enable Salsa CI.
+ * Update Lintian overrides.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Mon, 21 Feb 2022 07:35:51 +0000
+
+pandas (1.3.5+dfsg-2) unstable; urgency=medium
+
+ * Temporarily skip numba tests (see LP#1951814).
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Fri, 28 Jan 2022 19:22:53 +0000
+
+pandas (1.3.5+dfsg-1) unstable; urgency=medium
+
+ * Upstream bugfix release. Update contributors_list.
+ * Refresh patches, drop patches no longer needed.
+ * d/watch: Ignore RC versions.
+ * Docs: Add missing mathjax dependency.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Tue, 11 Jan 2022 21:25:17 +0000
+
+pandas (1.3.4+dfsg-7) unstable; urgency=medium
+
+ * Team upload.
+ * Disable more numba tests on 32bit archs
+
+ -- Jochen Sprickerhof <jspricke@debian.org> Thu, 02 Dec 2021 17:32:54 +0100
+
+pandas (1.3.4+dfsg-6) unstable; urgency=medium
+
+ * Team upload.
+ * Fix an other import in the test patch
+ * Add closes bug to old changelog
+
+ -- Jochen Sprickerhof <jspricke@debian.org> Wed, 01 Dec 2021 10:36:56 +0100
+
+pandas (1.3.4+dfsg-5) unstable; urgency=medium
+
+ * Team upload.
+ * Fix missing import
+ * Remove unused salsa-ci.yml
+
+ -- Jochen Sprickerhof <jspricke@debian.org> Tue, 30 Nov 2021 23:39:49 +0100
+
+pandas (1.3.4+dfsg-4) unstable; urgency=medium
+
+ * Tests: remove another 64 bit assumption.
+ * Warn the user and ignore all tests on mips*.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Tue, 30 Nov 2021 10:11:08 +0000
+
+pandas (1.3.4+dfsg-3) unstable; urgency=medium
+
+ * Tests: remove some more 64 bit or x86-or-arm64 assumptions.
+ * Docs: add missing MathJax.js symlink, remove unused URL replacement.
+ * Add transition Breaks (see #999415).
+ * Upload to unstable. (Closes: #999415)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Mon, 29 Nov 2021 21:59:11 +0000
+
+pandas (1.3.4+dfsg-2) experimental; urgency=medium
+
+ * Stop ignoring build-time tests.
+ * Tests: don't assume 64 bit or x86-or-arm64.
+ * Fix #877754 warning.
+ * Xfail more numba tests on big-endian systems.
+ * Skip test_statsmodels during build to break dependency cycle.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Thu, 25 Nov 2021 22:04:47 +0000
+
+pandas (1.3.4+dfsg-1) experimental; urgency=medium
+
+ [ Andreas Tille ]
+ * Fix watchfile to detect new versions on github
+ * New upstream version (Closes: #1000422)
+ * Standards-Version: 4.6.0 (routine-update)
+ * Respect DEB_BUILD_OPTIONS in override_dh_auto_test target (routine-
+ update)
+ * Remove trailing whitespace in debian/copyright (routine-update)
+ * Add salsa-ci file (routine-update)
+ * Set upstream metadata fields: Bug-Submit, Repository, Repository-Browse,
+ Security-Contact.
+ * Remove obsolete field Name from debian/upstream/metadata (already present in
+ machine-readable debian/copyright).
+ * Apply multi-arch hints.
+ + python-pandas-doc: Add Multi-Arch: foreign.
+ * Remove hidden files and directories that are confusing gbp
+ * drop tag definitions from debian/gbp.conf
+
+ [ Rebecca N. Palmer ]
+ * Upload to experimental (see #999415).
+ * Update contributors_list and d/copyright.
+ * Refresh patches, remove no longer needed patches.
+ * Update minimum dependency versions.
+ * Re-enable xlrd and numba tests (#976620 and #972246 have ben fixed).
+ * Temporarily disable failing style.ipynb build.
+ * Tests: accept changed error messages, add new dependency,
+ skip/xfail tests that our test setup breaks, clean up afterwards.
+ * Temporarily ignore build-time tests to get a first build.
+ * Be compatible with matplotlib 3.5.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 21 Nov 2021 21:04:26 +0000
+
+pandas (1.1.5+dfsg-2) unstable; urgency=medium
+
+ * Remove dead URL from tests/examples. (Closes: #979621)
+ * Mark autopkgtest needs-internet.
+ * Revert "Print uname etc during build".
+ * Mark matplotlib nocheck/nodoc and allow building on ports without
+ matplotlib or numexpr. (Closes: #977470)
+ * Add python3-tabulate build/test-depends.
+ * Add bottleneck and numexpr test-depends.
+ * Tests: don't require warnings that jedi no longer produces.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Tue, 12 Jan 2021 21:06:04 +0000
+
+pandas (1.1.5+dfsg-1) unstable; urgency=medium
+
+ * Upstream bugfix release. Refresh patches, contributors_list.
+ * Default to openpyxl not xlrd in read_excel, and skip xlrd tests,
+ as xlrd fails if defusedxml is installed (#976620).
+ * Skip numba tests, and not other rolling.apply tests, on s390x.
+ (LP: #1901860)
+ * Tests: on 32 bit systems, avoid time input that overflows.
+ * Print uname etc during build (test for #973854).
+ * Bump Standards-Version to 4.5.1 (no changes needed).
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Mon, 07 Dec 2020 23:06:28 +0000
+
+pandas (1.1.4+dfsg-1) unstable; urgency=medium
+
+ * Upstream bugfix release. Refresh patches, contributors_list.
+ * Remove numba test-depends (skips tests). (Closes: #973589)
+ * Loosen pandas-lib->pandas Depends versioning. (Closes: #973289)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Wed, 04 Nov 2020 18:36:11 +0000
+
+pandas (1.1.3+dfsg-2) unstable; urgency=medium
+
+ * Tests: re-xfail an intermittent instance of #877419.
+ * Remove no longer needed test_statsmodels xfail.
+ * Fix invalid test skips.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Thu, 22 Oct 2020 07:14:29 +0100
+
+pandas (1.1.3+dfsg-1) unstable; urgency=medium
+
+ * Upstream bugfix release. Refresh patches, contributors_list.
+ * Remove outdated README.source,
+ add contributors_list update process.
+ * Fix invalid test xfail.
+ * Re-add mistakenly removed non-x86 test xfails.
+ * Declare transition Breaks (see #969650).
+ * Skip another test if multiprocessing is not available.
+ * Update cython3 Depends.
+ * Fix pytables expression bug with Python 3.9. (Closes: #972015)
+ * Allow test_statsmodels to fail on 3.9 to break bootstrap cycle.
+ * Upload to unstable. (Closes: #969650)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 18 Oct 2020 16:22:36 +0100
+
+pandas (1.1.1+dfsg-3) experimental; urgency=medium
+
+ * Remove no longer needed test xfails.
+ * Xfail some more non-x86 numba tests and a new instance of #877419.
+ * Skip test_register_entrypoint during build.
+ * Tests: don't assume little-endian.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Thu, 03 Sep 2020 08:01:24 +0100
+
+pandas (1.1.1+dfsg-2) experimental; urgency=medium
+
+ * Unconditionally build-depend on sphinx-common (for dh_sphinxdoc).
+ * Re-enable but ignore another potentially crashing non-x86 test.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Tue, 01 Sep 2020 08:17:32 +0100
+
+pandas (1.1.1+dfsg-1) experimental; urgency=medium
+
+ * New upstream release.
+ * Upload to experimental.
+ * Drop/refresh patches. Update d/copyright and contributors_list.
+ * Re-enable asyncio tests.
+ * Skip fsspec tests while it is too old a version.
+ * Fix plot test cleanup (upstream bug 35080).
+ * Skip test that is expected to fail in our setup.
+ * Update minimum dependency versions.
+ * Use dh_sphinxdoc.
+ * Re-enable but ignore potentially crashing non-x86 tests.
+ * Simplify d/rules, mostly by using pybuild more.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Mon, 31 Aug 2020 18:44:39 +0100
+
+pandas (1.0.5+dfsg-3) unstable; urgency=medium
+
+ * Remove pytest-asyncio test-depends.
+ * Remove numba test-depends on non-x86: at least s390x crashes.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Wed, 26 Aug 2020 22:34:50 +0100
+
+pandas (1.0.5+dfsg-2) unstable; urgency=medium
+
+ * Fix missing import and update numba submodule name in patches.
+ * Disable asyncio tests (workaround for #969050).
+ * Warn that numba may give wrong answers on non-x86,
+ and remove test-depends on mipsel.
+ * Skip a crashing test on s390x.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Wed, 26 Aug 2020 20:19:16 +0100
+
+pandas (1.0.5+dfsg-1) unstable; urgency=medium
+
+ * Upstream bugfix release. Refresh patches, contributors_list.
+ * Fix invalid test xfails.
+ * Only Recommend numba on amd64, to reduce the risk of bugs.
+ * Don't test-depend on numba on ppc64el (where it crashes, #863511?)
+ or on ports architectures (where it mostly isn't available).
+ * Remove no longer needed test xfails/skips.
+ * Upload to unstable. (Closes: #950430)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Tue, 25 Aug 2020 20:07:50 +0100
+
+pandas (0.25.3+dfsg2-5) unstable; urgency=medium
+
+ * Tests: ignore rounding difference on i386. (Closes: #968208)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 16 Aug 2020 20:09:14 +0100
+
+pandas (0.25.3+dfsg2-4) unstable; urgency=medium
+
+ * Be compatible with matplotlib 3.3. (Closes: #966393)
+ * Docs: fix broken remote->local Javascript replacement.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 09 Aug 2020 22:11:25 +0100
+
+pandas (0.25.3+dfsg2-3) unstable; urgency=medium
+
+ * Nested DataFrames may raise ValueError with numpy 1.19
+ (upstream bug 32289). Clarify error message and xfail tests.
+ * Stop using a no-longer-existing numpy constant.
+ * Tests: ignore deprecations/rewordings and avoid setup exception
+ with numpy 1.19. (Together, the above Closes: #963817)
+ * Bump debhelper compat to 13.
+ * Fix HDFStore.flush (part of #877419) on s390x.
+ * Add NEWS.html.gz for Standards-Version 4.5.0.
+ (Choosing not to also add NEWS.gz as it would be large.)
+ * Tests: accept Hurd's errno and lack of multiprocessing.
+ * Docs: replace embedded Javascript copies with links.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 28 Jun 2020 21:47:22 +0100
+
+pandas (1.0.4+dfsg-1) experimental; urgency=medium
+
+ * Upstream bugfix release. (Closes: #962335)
+ * Refresh patches, update contributors_list.
+ * Fix broken tests.
+ * Tests: allow numba to raise an error on 32 bit systems.
+ * Don't test-depend on numba on armel (where it crashes,
+ possibly #863508) or ppc64/riscv64 (where it isn't available).
+ * Xfail some more HDF5 tests on big-endian architectures.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Tue, 09 Jun 2020 22:19:23 +0100
+
+pandas (0.25.3+dfsg2-2) unstable; urgency=medium
+
+ * Tests: don't fail on jedi deprecation warnings.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Thu, 07 May 2020 11:57:06 +0100
+
+pandas (1.0.3+dfsg2-1) experimental; urgency=medium
+
+ * Upstream bugfix release, update contributors_list.
+ * Merge packaging from unstable,
+ but omit no longer needed clipboard warn/xfail.
+ * Only show the NaN -> datetime warning from float dtypes
+ (to avoid an exception while trying to check).
+ * Recommend numba, as we now have a recent enough version.
+ * Re-add dask test-dependency.
+ * Clarify non-x86 warnings, remove no longer needed xfails / ignores.
+ * Clean up whitespace and patch names/descriptions.
+ * Remove patches no longer needed.
+ * Network tests: use more stable URLs.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Wed, 06 May 2020 17:07:44 +0100
+
+pandas (0.25.3+dfsg2-1) unstable; urgency=medium
+
+ * Remove inconveniently licensed (CC-BY-SA) snippets.
+ * Fix (harmless) SyntaxWarning on install. (Closes: #956021)
+ * Fix NaT sort order and test failures with numpy 1.18.
+ (Closes: #958531)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Wed, 06 May 2020 12:18:23 +0100
+
+pandas (0.25.3+dfsg-9) unstable; urgency=medium
+
+ * Don't raise on import without matplotlib installed. Add test of this.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Fri, 03 Apr 2020 21:56:02 +0100
+
+pandas (0.25.3+dfsg-8) unstable; urgency=medium
+
+ * Tests: don't fail on harmless changes in dependencies. (Closes: #954647)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Thu, 02 Apr 2020 18:53:32 +0100
+
+pandas (0.25.3+dfsg-7) unstable; urgency=medium
+
+ * Fix another test failure due to our warnings.
+ * Skip rather than xfail crashing tests.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Wed, 26 Feb 2020 18:45:58 +0000
+
+pandas (0.25.3+dfsg-6) unstable; urgency=medium
+
+ * Don't fail tests on our own warnings.
+ * Xfail some more HDF tests on non-x86 architectures.
+ * Warn that clipboard I/O is broken on big-endian architectures
+ and xfail test.
+ * Use pytest-forked to isolate (already xfailed) crashing test.
+ * Xfail tests that use no-longer-existing URLs.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Wed, 26 Feb 2020 07:40:25 +0000
+
+pandas (0.25.3+dfsg-5) unstable; urgency=medium
+
+ * Backport packaging from experimental:
+ - Remove unnecessary test skips, and reorganize remaining ones.
+ - Use xfails instead of skips.
+ - Add warnings for the known non-x86 breakages
+ (NaN -> datetime #877754, HDF and Stata I/O #877419).
+ - Tell I/O tests where to find the source tree's test data
+ instead of skipping them.
+ - Stop using deprecated envvar/tag names.
+ - Use https for links where available.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Mon, 24 Feb 2020 22:38:26 +0000
+
+pandas (1.0.1+dfsg-1) experimental; urgency=medium
+
+ * Upstream bugfix release.
+ * Refresh patches.
+ * Update and sort d/copyright, update contributors_list.
+ * Re-enable checking the test suite.
+ * Declare transition Breaks (see #950430).
+ * Add jinja2 recommends/test-depends.
+ * Fix test_to_numpy failure on big-endian systems.
+ * Register documentation in doc-base. (Closes: #879226)
+ * Remove no longer needed test xfails/skips,
+ and reorganize the remaining ones.
+ * Tell I/O tests where to find the source tree's test data
+ instead of skipping them.
+ * Enable multiarch.
+ * Temporarily drop dask test-dependency to avoid uninstallability.
+ * Add warnings for the known non-x86 breakages
+ (NaN -> datetime #877754, HDF and Stata I/O #877419).
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 23 Feb 2020 17:13:08 +0000
+
+pandas (1.0.0+dfsg-1) experimental; urgency=medium
+
+ * New upstream release.
+ * Upload to experimental, as this is an API break (see #950430).
+ * Drop patches applied upstream, refresh others.
+ * Update and improve d/copyright, update contributors_list.
+ * Xfail a test that fails in the C locale.
+ * Update and organize depends/recommends.
+ * Docs: use a sphinx theme we have, fix spelling,
+ link to rather than embed remote resource,
+ use https links where available.
+ * Stop using deprecated envvar/tag names.
+ * Xfail rather than skip previously broken tests,
+ and put the condition in the patch not d/rules or d/tests.
+ * Remove no longer used patch-stamp.
+ * Temporarily ignore the test suite to get a first build.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 02 Feb 2020 21:04:36 +0000
+
+pandas (0.25.3+dfsg-4) unstable; urgency=medium
+
+ * No-change upload to unstable. (Closes: #937236, #931557)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 10 Nov 2019 16:35:41 +0000
+
+pandas (0.25.3+dfsg-3) experimental; urgency=medium
+
+ * Fix autopkgtest.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sat, 09 Nov 2019 10:29:47 +0000
+
+pandas (0.25.3+dfsg-2) experimental; urgency=medium
+
+ * Split up the test suite to fit in memory on mipsel,
+ and stop ignoring it there. (Closes: #943732)
+ * Reproducibility: use correct path for stripping docs.
+ * Declare transition Breaks (see #931557).
+ * Tests: ignore warning from Python 3.8.
+ * Update d/copyright (some files have moved).
+ * Use local requirejs.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Fri, 08 Nov 2019 07:56:16 +0000
+
+pandas (0.25.3+dfsg-1) experimental; urgency=medium
+
+ * Upstream bugfix release.
+ * Drop patch no longer needed.
+ * Update autopkgtest dependencies, drop unused link.
+ * Better document test skips, remove unnecessary ones.
+ * Reproducibility: strip timestamps and build paths,
+ use fixed random seeds for building documentation.
+ * Ignore test suite on mipsel.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sat, 02 Nov 2019 22:26:31 +0000
+
+pandas (0.25.2+dfsg-2) experimental; urgency=medium
+
+ * Correct path for contributors list, and don't fail when
+ not building the -doc package.
+ * Try again to fix test failure due to deb_nonversioneer_version.
+ * Skip some failing tests on non-Intel (see #943732),
+ require other tests to pass.
+ * Fix another typo.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Mon, 28 Oct 2019 22:06:10 +0000
+
+pandas (0.25.2+dfsg-1) experimental; urgency=medium
+
+ [ Graham Inggs ]
+ * Skip python2 test_register_by_default on s390x
+ * Fix python2 test failures in certain locales
+
+ [ Yaroslav Halchenko ]
+ * Recent upstream release
+ * Updated patches
+ * Adjusted for the gone ci/print_versions
+ * d/control
+ - added python{,3}-hypothesis to b-depends
+
+ [ Rebecca N. Palmer ]
+ * New upstream release.
+ * Upload to experimental, as this is an API break (see #931557).
+ * Drop patches fixed upstream, refresh others.
+ * Remove Python 2 packages (see #937236).
+ * Use Python 3 in shebangs and subprocess calls.
+ * Re-enable building on Python 3.8.
+ * Use the new location of print_versions.
+ * Skip feather tests and remove build-dependency:
+ they now need pyarrow.feather, which isn't in Debian.
+ * Don't fail tests for our versioneer removal
+ or a differently worded error message.
+ * Add/update minimum dependency versions.
+ * Add numpydoc, nbconvert and pytest-xdist build-depends.
+ * Update d/copyright.
+ * Pre-generate a contributor list to avoid needing the git log
+ at build time (when it won't exist).
+ * Allow tests to fail for now.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Mon, 28 Oct 2019 07:53:21 +0000
+
+pandas (0.23.3+dfsg-8) unstable; urgency=medium
+
+ * Examples dependencies: re-add statsmodels and xarray;
+ also add rpy2 and feather.
+ * Use packaged intersphinx indexes. (Closes: #876417)
+ * Use https for intersphinx links.
+ * Remove cythonized-files*. (They are regenerated on each build.)
+ * Remove test xfail, as statsmodels has now been fixed.
+ * Set Rules-Requires-Root: no.
+ * Make documentation Suggest the Python 3 version.
+ * Suggest statsmodels.
+ * Only use Python 3 sphinx, and mark it -Indep/nodoc.
+ * Bump debhelper compat to 12 and use debhelper-compat and pybuild.
+ * Remove pycompat and X-Python*-Version.
+ * Add missing d/copyright item.
+ * Remove obsolete TODOs.
+ * Clarify descriptions.
+ * Stop referring to examples that no longer exist.
+ * Fix typos.
+ * Remove old (no longer used) EXCLUDE_TESTS*.
+ * Deduplicate documentation files.
+ * Use Python 3 shebangs, and fix broken shebang.
+ * Add python3-ipykernel, -ipywidgets, -seaborn to
+ Build-Depends-Indep.
+ * Disable dh_auto_test: it fails, and we run the tests elsewhere.
+ * Mark test dependencies nocheck/nodoc.
+ * Remove old minimum versions / alternative dependencies.
+ * Build-depend on dh-python.
+ * Don't build on python3.8, as it will fail tests (see #931557).
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 27 Oct 2019 11:38:37 +0000
+
+pandas (0.23.3+dfsg-7) unstable; urgency=medium
+
+ * Revert test patch and use an xfail instead.
+ * Temporarily drop statsmodels+xarray Build-Depends, as they are
+ uninstallable until this is built.
+ * Add python3-xarray to autopkgtest Depends.
+ * Drop Python 2 autopkgtest (but keep build-time test).
+ * Remove duplicate Recommends.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Fri, 20 Sep 2019 08:01:37 +0100
+
+pandas (0.23.3+dfsg-6) unstable; urgency=medium
+
+ * Team upload
+ * Avoid FTBFS with statsmodels 0.9.0
+ * Add python3-statsmodels to autopkgtest Depends
+
+ -- Graham Inggs <ginggs@debian.org> Wed, 18 Sep 2019 13:46:01 +0000
+
+pandas (0.23.3+dfsg-5) unstable; urgency=medium
+
+ * Team upload
+ * Add locales-all to Build-Depends and autopkgtest Depends in order to
+ consistently test in all avalable locales
+ * Add crh_UA to skip_noencoding_locales.patch
+ * Fix wrong debian/source/options exclude, thanks Steve Langasek
+
+ -- Graham Inggs <ginggs@debian.org> Wed, 18 Sep 2019 05:57:44 +0000
+
+pandas (0.23.3+dfsg-4) unstable; urgency=medium
+
+ * Add self to Uploaders.
+ * Recommend .xls format support also in Python 3. (Closes: #880125)
+ * Tests: don't call fixtures, as this is an error in pytest 4+.
+ * Don't test datetime in locales with no encoding.
+ (These are broken by a Python stdlib bug.)
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sat, 14 Sep 2019 16:37:43 +0100
+
+pandas (0.23.3+dfsg-3) unstable; urgency=medium
+
+ * Team upload.
+ * Make np.array @ Series act the right way round. (Closes: #923708)
+ * Replace #918206 fix with a fix that doesn't change the return type
+ and inplace-ness of np.array += DataFrame. (Closes: #923707)
+ * Fix missing page in documentation.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Wed, 06 Mar 2019 22:19:34 +0000
+
+pandas (0.23.3+dfsg-2) unstable; urgency=medium
+
+ * Team upload.
+ * Don't fail the build on +dfsg versions.
+ * Fix another d/copyright issue.
+ * Add d/upstream/metadata.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sat, 02 Mar 2019 14:57:12 +0000
+
+pandas (0.23.3+dfsg-1) unstable; urgency=medium
+
+ * Team upload.
+ * Fix DataFrame @ np.array matrix multiplication. (Closes: #918206)
+ * Fix documentation build (Sphinx now defaults to Python 3).
+ (Closes: #804552, LP: #1803018)
+ * Add documentation examples dependencies.
+ * Update d/copyright.
+ * Remove unlicensed files.
+
+ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Fri, 01 Mar 2019 23:02:18 +0000
+
+pandas (0.23.3-1) unstable; urgency=medium
+
+ * New upstream release
+ * debian/patches
+ - many upstreamed patches are removed and others refreshed
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Sat, 28 Jul 2018 00:39:32 -0400
+
+pandas (0.22.0-8) unstable; urgency=medium
+
+ * Team Upload.
+ * patches:
+ + Add patch: deb_dont_call_py2_in_py3_test.patch
+ During python3 unit test, command 'python' is called by one of
+ the tests. When there is no python2 installation, tests such as
+ autopkgtest would fail.
+ * Put the conditionally applied patch to series' comment to avoid
+ lintianW: patch-file-present-but-not-mentioned-in-series.
+ * Trying to fix the autopkgtest:
+ + Leave a comment about the way to run unittest in the test control file.
+ + Synchronize B-D and autopkgtest depends.
+ + Allow output to stderr during test.
+ * Switch from nosetest to pytest.
+ * Synchronize pytest argument for rules and autopkgtest.
+ - Replace tests/unittest with the symlink pointed to tests/unittest3.
+ That scripts is smart enough to tell from py2 and py3, so we won't
+ need to write the same thing twice.
+ - Filter out intel tests on non-x86 architectures.
+ - Only enable "slow" tests on (Debian + x86) tester. "slow" tests may
+ consume too much memory to cause memory error or trigger OOM killer.
+ * control:
+ + Add missing python3 dependencies and sort the B-D list.
+ * Point Vcs-* fields to Salsa.
+ * Update Homepage to https://pandas.pydata.org/ .
+ * rules:
+ * Reverse the architecture filtering logic.
+ * Disable "slow" tests during build for non-x86 architectures.
+ This may significantly reduce the build time on those weak architectures.
+ * Don't specify the pytest marker expression twice.
+ The first expression will be overridden.
+ * Fix hardening flags.
+ - Cleanup the mess of unused nosetest exclusion expressions.
+ * Update lintian overrides.
+ + Override source-is-missing error, which is a false-positive triggered
+ by insane-line-length-in-source-file.
+ + Override insane-line-length-in-source-file because we have nothing
+ todo with lenghy lines in html.
+ * TODO: Point out that the unittest speed can be boosted with pytest-xdist.
+
+ -- Mo Zhou <cdluminate@gmail.com> Sun, 17 Jun 2018 16:01:16 +0000
+
+pandas (0.22.0-7) unstable; urgency=medium
+
+ * Team Upload.
+
+ [ Mo Zhou ]
+ * Remove patch: deb_fix_test_failure_test_basic_indexing, which is
+ unneeded for pandas >= 0.21 . (Closes: #900061)
+
+ [ Graham Inggs ]
+ * Add riscv64 to the list of "not intel" architectures
+ * Update mark_tests_working_on_intel_armhf.patch
+
+ -- Graham Inggs <ginggs@debian.org> Tue, 29 May 2018 13:50:59 +0000
+
+pandas (0.22.0-6) unstable; urgency=medium
+
+ * Team upload
+ * Fix FTBFS with Sphinx 1.7, thanks Dmitry Shachnev!
+
+ -- Graham Inggs <ginggs@debian.org> Tue, 24 Apr 2018 19:09:20 +0000
+
+pandas (0.22.0-5) unstable; urgency=medium
+
+ * Team upload
+ * Add compatibility with Matplotlib 2.2 (Closes: #896673)
+
+ -- Graham Inggs <ginggs@debian.org> Mon, 23 Apr 2018 13:56:12 +0000
+
+pandas (0.22.0-4) unstable; urgency=medium
+
+ * Team upload
+ * Fix more tests expecting little-endian results
+ * Fix heap corruption in read_csv on 32-bit, big-endian architectures
+ (Closes: #895890)
+
+ -- Graham Inggs <ginggs@debian.org> Sun, 22 Apr 2018 21:48:27 +0000
+
+pandas (0.22.0-3) unstable; urgency=medium
+
+ * Team upload
+ * Refresh and re-enable mark_tests_working_on_intel.patch
+ * Fix test__get_dtype tests expecting little-endian results
+
+ -- Graham Inggs <ginggs@debian.org> Thu, 12 Apr 2018 11:04:21 +0000
+
+pandas (0.22.0-2) unstable; urgency=medium
+
+ * debian/patches
+ - as upstream moved over to pytest from nose, no more nose imports were
+ in the code. Just adjusted patches to import nose where needed
+ * debian/rules
+ - specify LC_ALL=C locale to avoid crash while building docs
+ - add the 0001-TST-pytest-deprecation-warnings-GH17197-17253-reversed.patch
+ to the series if building on a system with an old pytest
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Wed, 21 Feb 2018 23:44:58 -0500
+
+pandas (0.22.0-1) unstable; urgency=medium
+
+ * Upstream release
+ * debian/patches
+ - refreshed many
+ - updated some
+ - added
+ - up_moto_optional to skip tests requiring moto (#777089)
+ - deb_skip_difffailingtests to skip two failing tests
+ (see https://github.com/pandas-dev/pandas/issues/19774)
+ - up_xlwt_optional to skip a test requiring xlwt
+ - deb_ndsphinx_optional to make nbsphinx optional.
+ Make nbsphinx not required in build-depends on systems with
+ older python-sphinx
+ - mark_tests_failing_on_386.patch
+ see https://github.com/pandas-dev/pandas/issues/19814
+ - removed adopted upstream:
+ - dateutil-2.6.1-fixed-ambiguous-tz-dst-be.patch
+ - up_tst_np_argsort_comparison2
+ - disabled for now:
+ - mark_tests_working_on_intel.patch
+ - up_tst_dont_assert_that_a_bug_exists_in_numpy
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Wed, 21 Feb 2018 10:30:06 -0500
+
+pandas (0.20.3-11) unstable; urgency=medium
+
+ * Team upload.
+ * Cherry-pick upstream commit 5f2b96bb637f6ddeec169c5ef8ad20013a03c853
+ to workaround a numpy bug. (Closes: #884294)
+ + patches/up_tst_dont_assert_that_a_bug_exists_in_numpy
+ * Cherry-pick upstream commits to fix test failure caused by test_argsort().
+ + patches/up_tst_np_argsort_comparison2
+ * Workaround test failure of test_basic_indexing() in file
+ pandas/tests/series/test_indexing.py .
+ + patches/deb_fix_test_failure_test_basic_indexing
+
+ -- Mo Zhou <cdluminate@gmail.com> Sat, 20 Jan 2018 09:00:31 +0000
+
+pandas (0.20.3-10) unstable; urgency=medium
+
+ * Team upload.
+ * Exclude more tests failing on mips, armhf and powerpc
+
+ -- Andreas Tille <tille@debian.org> Tue, 24 Oct 2017 21:26:02 +0200
+
+pandas (0.20.3-9) unstable; urgency=medium
+
+ * Team upload.
+ * Add missing "import pytest" to two patched tests
+ * Secure URI in watch file
+
+ -- Andreas Tille <tille@debian.org> Tue, 24 Oct 2017 08:18:54 +0200
+
+pandas (0.20.3-8) unstable; urgency=medium
+
+ * Team upload.
+ * Exclude one more test and de-activate non-working ignore of test errors
+
+ -- Andreas Tille <tille@debian.org> Mon, 23 Oct 2017 21:32:24 +0200
+
+pandas (0.20.3-7) unstable; urgency=medium
+
+ * Team upload.
+ * debhelper 9
+ * Use Debian packaged mathjax
+ * Do not Recommends python3-six since it is mentioned in Depends
+ * Remove redundant/outdated XS-Testsuite: autopkgtest
+ * Exclude one more test and de-activate non-working ignore of test errors
+
+ -- Andreas Tille <tille@debian.org> Mon, 23 Oct 2017 17:33:55 +0200
+
+pandas (0.20.3-6) unstable; urgency=medium
+
+ * Team upload.
+ * Ignore test errors on some architectures
+ (Concerns bug #877419)
+ * Remove __pycache__ remainings from testing
+ * Standards-Version: 4.1.1
+ * DEP3 for Google Analytics patch
+ * Complete Google Analytics patch
+
+ -- Andreas Tille <tille@debian.org> Mon, 23 Oct 2017 09:05:27 +0200
+
+pandas (0.20.3-5) unstable; urgency=medium
+
+ * Make sure remainings of nose tests will not fail. That's a pretty stupid
+ patch since the tests are not using nose any more only some remaining
+ exceptions. Hope it will work anyway.
+ (Concerns bug #877419)
+
+ -- Andreas Tille <tille@debian.org> Mon, 16 Oct 2017 21:57:45 +0200
+
+pandas (0.20.3-4) unstable; urgency=medium
+
+ * Mark those tests @pytest.mark.intel that pass only on Intel architectures
+ * d/rules: try to exclude tests that were marked "intel"
+ (Concerns bug #877419)
+
+ -- Andreas Tille <tille@debian.org> Sat, 14 Oct 2017 19:49:01 +0200
+
+pandas (0.20.3-3) unstable; urgency=medium
+
+ * Team upload.
+ * Moved packaging from pkg-exppsy to Debian Science
+ * Exclude certain tests on certain architectures
+ (Concerns bug #877419)
+
+ -- Andreas Tille <tille@debian.org> Fri, 13 Oct 2017 20:52:53 +0200
+
+pandas (0.20.3-2) unstable; urgency=medium
+
+ * debian/control
+ - boosted policy to 4.0.0 (I think we should be ok)
+ - drop statsmodels from build-depends to altogether avoid the circular
+ build-depends (Closes: #875805)
+ * Diane Trout:
+ - Add dateutil-2.6.1-fixed-ambiguous-tz-dst-be.patch (Closes: #875807)
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Thu, 21 Sep 2017 16:11:29 -0400
+
+pandas (0.20.3-1) unstable; urgency=medium
+
+ * Fresh upstream release
+ * debian/patches
+ - updated some, removed changeset*, and disabled possibly fixed upstream
+ ones
+ * debian/{control,rules}
+ - upstream switched to use pytest instead of nose
+ - enabled back all the tests for now
+ - added python-nbsphinx for b-depends, needed for docs
+ * debian/*.install
+ - no .so at the first level of subdirectories, now present on the third
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Mon, 10 Jul 2017 20:00:59 -0400
+
+pandas (0.19.2-5.1) unstable; urgency=medium
+
+ * Non-maintainer upload.
+ * Apply patch by Rebecca N. Palmer
+ Closes: #858260
+
+ -- Andreas Tille <tille@debian.org> Sun, 02 Apr 2017 07:06:36 +0200
+
+pandas (0.19.2-5) unstable; urgency=medium
+
+ * And one more test to skip on non-amd64 -- test_round_trip_valid_encodings
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Thu, 12 Jan 2017 13:10:11 -0500
+
+pandas (0.19.2-4) unstable; urgency=medium
+
+ * Exclude few more "plotting" tests on non-amd64 which cause FTBFS
+ on s390
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Thu, 12 Jan 2017 11:43:13 -0500
+
+pandas (0.19.2-3) unstable; urgency=medium
+
+ * Brought back changeset_0699c89882133a41c250abdac02796fec84512e8.diff
+ which should resolve tests failures on BE platforms (wasn't yet
+ upstreamed within 0.19.x releases)
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Thu, 12 Jan 2017 09:44:52 -0500
+
+pandas (0.19.2-2) unstable; urgency=medium
+
+ * Exclude a number of tests while running on non-amd64 platforms
+ due to bugs in numpy/pandas
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Wed, 11 Jan 2017 12:13:05 -0500
+
+pandas (0.19.2-1) unstable; urgency=medium
+
+ * Fresh upstream minor release -- supposed to be bugfix but interacts
+ with current beta (1:1.12.0~b1-1) numpy leading to various failed tests
+ * debian/patches
+ - changeset_ae6a0a51cf41223394b7ef1038c210045d486cc8.diff
+ to guarantee the same Series dtype as of cut regardless of architecture
+ - up_buggy_overflows
+ workaround for inconsistent overflows while doing pow operation on big
+ ints
+ * debian/rules
+ - exclude more tests which are due to known issues in numpy beta and thus
+ not to be addressed directly in pandas
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Wed, 04 Jan 2017 10:19:52 -0500
+
+pandas (0.19.1+git174-g81a2f79-1) experimental; urgency=medium
+
+ * New upstream snapshot from v0.19.0-174-g81a2f79
+ - lots of bugfixes since 0.19.1, so decided to test snapshot
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Sat, 10 Dec 2016 22:43:19 -0500
+
+pandas (0.19.1-3) unstable; urgency=medium
+
+ * Require cython >= 0.23 or otherwise use pre-cythoned sources
+ (should resolve https://github.com/pandas-dev/pandas/issues/14699
+ on jessie)
+ * debian/control
+ - Build-Conflicts with python-tables 3.3.0-4 since that one leads to FTBFS
+ - boosted policy to 3.9.8
+ * debian/rules
+ - Exclude few more tests which fail on big endian and other platforms
+ test_(msgpack|read_dta18)
+ * debian/patches
+ - changeset_0699c89882133a41c250abdac02796fec84512e8.diff
+ to compare in the tests against native endianness
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Fri, 09 Dec 2016 15:49:50 -0500
+
+pandas (0.19.1-2) unstable; urgency=medium
+
+ * debian/control
+ - Moved statsmodels build-depend (optional) under build-depends-indep
+ to break circular dependency. Thanks Stuart Prescott for the analysis
+ * debian/patches/
+ - changeset_1309346c08945cd4764a549ec63cf51089634a45.diff
+ to not mask problem reading json leading to use of undefined variable
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Sun, 27 Nov 2016 21:49:40 -0500
+
+pandas (0.19.1-1) unstable; urgency=medium
+
+ * Fresh upstream release
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Fri, 18 Nov 2016 12:19:54 -0500
+
+pandas (0.19.0+git14-ga40e185-1) unstable; urgency=medium
+
+ * New upstream post-release (includes some bugfixes) snapshot
+ * debian/patches
+ - dropped changeset_ and up_ patches adopted upstream, refreshed the rest
+ * debian/rules,patches
+ - save debian-based version into __version.py, so doesn't conflict with
+ upstream tests of public API
+ - exclude for now test_expressions on python3
+ (see https://github.com/pydata/pandas/issues/14269)
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Thu, 13 Oct 2016 10:26:18 -0400
+
+pandas (0.18.1-1) unstable; urgency=medium
+
+ * Fresh upstream release
+ * debian/patches/
+ - changeset_46af7cf0f8e0477f6cc7454aa786a573228f0ac3.diff
+ to allow also AttributeError exception being thrown in the tests
+ (Closes: #827938)
+ - debian/patches/deb_skip_test_precision_i386
+ removed (upstreamed)
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Wed, 13 Jul 2016 10:42:00 -0400
+
+pandas (0.18.0+git114-g6c692ae-1) unstable; urgency=medium
+
+ * debian/control
+ - added python{,3}-pkg-resources to direct Depends for the packages
+ (Closes: #821076)
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Sun, 17 Apr 2016 20:49:25 -0400
+
+pandas (0.17.1-3) unstable; urgency=medium
+
+ * debian/tests/unittest*
+ - set LC_ALL=C.UTF-8 for the tests run to prevent failure of test_set_locale
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Tue, 08 Dec 2015 08:31:30 -0500
+
+pandas (0.17.1-2) unstable; urgency=medium
+
+ * debian/control
+ - make -statsmodels and -tables optional build-depends on those platforms
+ where they are N/A atm. Added bdepends on python3-tables since available
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Sun, 06 Dec 2015 12:58:26 -0500
+
+pandas (0.17.1-1) unstable; urgency=medium
+
+ * Fresh upstream bugfix release
+ * debian/rules
+ - fixed deletion of moved away .so files
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Fri, 27 Nov 2015 10:52:49 -0500
+
+pandas (0.17.0+git8-gcac4ad2-2) unstable; urgency=medium
+
+ * Bug fix: install also msgpack/*.so extensions to -lib packages
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Sat, 10 Oct 2015 13:52:54 -0400
+
+pandas (0.17.0+git8-gcac4ad2-1) unstable; urgency=medium
+
+ * New upstream snapshot post release to pick up few bugfixes
+ - Started to trigger failures of test_constructor_compound_dtypes and
+ test_invalid_index_types -- disabled those for now, see
+ https://github.com/pydata/pandas/issues/11169
+ * debian/rules
+ - Generate pandas/version.py if not present out of debian/changelog
+ upstream version information (versioneer wouldn't know since relies on
+ git)
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Fri, 09 Oct 2015 21:35:23 -0400
+
+pandas (0.16.2+git65-g054821d-1) unstable; urgency=medium
+
+ * Fresh upstream post-release snapshot (to pick up recent fixes etc)
+ (Closes: #787432)
+ * debian/{control,rules}
+ - build -doc package (Closes: #660900)
+ - add ipython (or alternative new ones from neurodebian) into
+ Build-Depends-Indep to build docs
+ - add python{,3}-{lxml,html5lib} to Build-Depends and Recommends
+ - use LC_ALL=C.UTF-8 while running tests
+ - exclude also test_set_locale since it fails ATM
+ see https://github.com/pydata/pandas/issues/10471
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Tue, 30 Jun 2015 17:26:54 -0400
+
+pandas (0.16.0~rc1-1) experimental; urgency=medium
+
+ * New upstream release candidate
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Fri, 13 Mar 2015 14:21:39 -0400
+
+pandas (0.15.2-1) unstable; urgency=medium
+
+ * Fresh upstream release
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Thu, 11 Dec 2014 09:51:57 -0500
+
+pandas (0.15.1+git125-ge463818-1) unstable; urgency=medium
+
+ * New upstream snapshot from v0.15.1-125-ge463818.
+ * Upload to unstable during freeze since previous one in sid didn't make it
+ to jessie anyways
+ * debian/control
+ - remove versioning demand for cython (it would use pre-cythonized code on
+ older ones and there is no longer need in sid/jessie to enforce version).
+ As a consecuence -- removed all dsc patches pointing to
+ nocython3-dsc-patch, since no longer needed
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Sun, 30 Nov 2014 21:09:36 -0500
+
+pandas (0.15.0-2) unstable; urgency=medium
+
+ * debian/control
+ - specify minimal numpy to be 1.7
+ * debian/patches
+ - deb_skip_stata_on_bigendians skip test_stata again on BE platforms
+ - deb_skip_test_precision_i386 skip test_precision_conversion on 32bit
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Thu, 30 Oct 2014 23:09:13 -0400
+
+pandas (0.15.0-1) unstable; urgency=medium
+
+ * New upstream release
+ * debian/control
+ - restrict statsmodels and matplotlib from being required on the ports
+ which do not have them
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Sun, 26 Oct 2014 11:30:23 -0400
+
+pandas (0.14.1-2) unstable; urgency=medium
+
+ * debian/patches/changeset_314012d.diff
+ - Fix converter test for MPL1.4 (Closes: #763709)
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Mon, 06 Oct 2014 11:53:42 -0400
+
+pandas (0.14.1-1) unstable; urgency=medium
+
+ * New upstream release
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Thu, 10 Jul 2014 23:38:49 -0400
+
+pandas (0.14.0+git393-g959e3e4-1) UNRELEASED; urgency=medium
+
+ * New upstream snapshot from v0.14.0-345-g8cd3dd6
+ * debian/rules
+ - disable running disabled tests to prevent clipboard tests failures
+ under kfreebsd kernels
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Mon, 07 Jul 2014 12:29:50 -0400
+
+pandas (0.14.0+git213-g741b2fa-1) experimental; urgency=medium
+
+ * New upstream snapshot from v0.14.0-213-g741b2fa.
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Thu, 19 Jun 2014 10:30:42 -0400
+
+pandas (0.14.0+git17-g3849d5d-1) unstable; urgency=medium
+
+ * New upstream snapshot from v0.14.0-17-g3849d5d -- has resolved a number
+ of bugs sneaked into 0.14.0 release, and caused FTBFS on some platforms
+ and backports
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Sun, 01 Jun 2014 00:54:34 -0400
+
+pandas (0.14.0-1) unstable; urgency=medium
+
+ * New upstream release
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Fri, 30 May 2014 08:45:35 -0400
+
+pandas (0.14.0~rc1+git79-g1fa5dd4-1) experimental; urgency=medium
+
+ * New upstream snapshot from v0.14.0rc1-73-g8793356
+ * debian/patches:
+ - dropped CPed changeset_*s
+ - added deb_disable_googleanalytics
+ * debian/control:
+ - boosted policy compliance to 3.9.5
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Tue, 27 May 2014 16:00:00 -0400
+
+pandas (0.13.1-2) unstable; urgency=low
+
+ * debian/patches/changeset_6d56e7300d66d3ba76684334bbb44b6cd0ea9f61.diff
+ to fix FTBFS of statsmodels due to failing tests (Closes: #735804)
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Sat, 08 Feb 2014 12:46:42 -0500
+
+pandas (0.13.1-1) unstable; urgency=low
+
+ * Fresh upstream release
+ * debian/patches
+ - deb_skip_test_pytables_failure to mitigate error while testing on
+ amd64 wheezy and ubuntu 12.04
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Tue, 04 Feb 2014 12:09:29 -0500
+
+pandas (0.13.0+git464-g15a8ff7-1) experimental; urgency=low
+
+ * Fresh pre-release snapshot
+ * debian/patches
+ - removed all cherry-picked patches (should have been upstreamed)
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Wed, 29 Jan 2014 21:27:45 -0500
+
+pandas (0.13.0-2) unstable; urgency=low
+
+ * debian/patches
+ - 0001-BLD-fix-cythonized-msgpack-extension-in-setup.py-GH5.patch
+ to resolve issue with building C++ Cython extension using
+ pre-generated sources
+ - 0001-Add-division-future-import-everywhere.patch
+ 0002-remove-explicit-truediv-kwarg.patch
+ to resolve compatibility issues with elderly Numexpr
+ - 0001-BUG-Yahoo-finance-changed-ichart-url.-Fixed-here.patch
+ - deb_skip_sequencelike_on_armel to prevent FTBFS on armel due to failing
+ test: https://github.com/pydata/pandas/issues/4473
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Fri, 03 Jan 2014 23:13:48 -0500
+
+pandas (0.13.0-1) unstable; urgency=low
+
+ * Fresh upstream release
+ - resolved compatibility with matplotlib 1.3 (Closes: #733848)
+ * debian/{control,rules}
+ - use xvfb (added to build-depends together with xauth, and xclip)
+ for tests
+ - define http*_proxy to prevent downloads
+ - install .md files not .rst for docs -- were renamed upstream
+ - include .cpp Cython generated files into debian/cythonized-files*
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Wed, 01 Jan 2014 18:08:22 -0500
+
+pandas (0.12.0-2) unstable; urgency=low
+
+ [ Dmitry Shachnev ]
+ * DEP-8 tests improvements:
+ - Use Xvfb for running tests.
+ - Increase verbosity using -v flag.
+ - Fix printing interpreter version in unittests3.
+ * Fix indentaion in debian/control.
+
+ [ Yaroslav Halchenko ]
+ * debian/control
+ - place python3-matplotlib ahead of elderly python-matplotlib without
+ python3 support since now we have python3-matplotlib in sid
+ * debian/copyright
+ - go through reported missing copyright/license statements (Closes:
+ #700564) Thanks Luca Falavigna for the report
+ * debian/rules,patches
+ - exclude test test_bar_log due to incompatibility with matplotlib 1.3.0 (test
+ adjusted upstream and would be re-enabled for the new release).
+ - debian/patches/changeset_952c5f0bc433622d21df20ed761ee4cb728370eb.diff
+ adds matplotlib 1.3.0 compatibility
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Sat, 14 Sep 2013 20:02:58 -0400
+
+pandas (0.12.0-1) unstable; urgency=low
+
+ * New upstream release:
+ - should address failed tests on 32bit platforms
+ * debian/patches
+ - neurodebian: allow to build for jessie with outdated cython
+ * debian/control
+ - build for Python2 >= 2.7 due to some (probably temporary) incompatibilities
+ in tests with 2.6
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Wed, 24 Jul 2013 23:29:03 -0400
+
+pandas (0.12.0~rc1+git127-gec8920a-1) experimental; urgency=low
+
+ * New upstream snapshot from origin/master at v0.12.0rc1-127-gec8920a
+ - should address FTBFS due to failing tests on big endians
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Sat, 20 Jul 2013 09:23:04 -0400
+
+pandas (0.12.0~rc1+git112-gb79996c-1) experimental; urgency=low
+
+ * Fresh git snapshot of upstream candidate release. Experimental build
+ to verify functioning across the ports.
+ * debian/control
+ - dedented last "paragraph" to break it away from the 2nd one.
+ Thanks Beatrice Torracca for the detailed report (Closes: #712260)
+ - Depends on python-six now
+ * debian/{,tests/}control
+ - added python{,3}-bs4, python-html5lib to Build-Depends for more
+ thorough testing
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Thu, 18 Jul 2013 13:15:19 -0400
+
+pandas (0.11.0-2) unstable; urgency=low
+
+ [ Yaroslav Halchenko ]
+ * Upload to unstable -- this upstream release addressed Cython 0.19
+ compatibility issue (Closes: #710608)
+ * Recommends numexpr
+ * Re-cythonized using Cython 0.19
+
+ [ Dmitry Shachnev ]
+ * debian/tests/unittests3: use nosetests3 instead of nosetests-3.x.
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Mon, 03 Jun 2013 11:57:43 -0400
+
+pandas (0.11.0-1) experimental; urgency=low
+
+ * New upstream release
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Tue, 23 Apr 2013 22:40:15 -0400
+
+pandas (0.10.1-1) experimental; urgency=low
+
+ * New upstream release
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Tue, 22 Jan 2013 13:07:31 -0500
+
+pandas (0.10.0-1) experimental; urgency=low
+
+ * New upstream release
+ - drops python 2.5 support (we are dropping pyversions in favor of
+ X-Python-Version)
+ * debian/patches:
+ - all previous are in upstream now, dropped locally
+ - added -dsc-patch'es for systems without cython3
+ * debian/control:
+ - added python-statsmodels for the extended tests coverage
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Mon, 17 Dec 2012 12:27:25 -0500
+
+pandas (0.9.1-2) unstable; urgency=low
+
+ [ Julian Taylor ]
+ * Provide python3 packages
+ * Add autopkgtests
+ * debian/patches:
+ - relax-float-tests.patch:
+ replace float equality tests with almost equal
+ - fix-endian-tests.patch:
+ patch from upstream to fix the test failure on big endian machines
+
+ [ Yaroslav Halchenko ]
+ * Upload to unstable
+ * Dropping pysupport
+ * debian/rules:
+ - slight reduction of code duplication between python 2 and 3
+ - cythonize for both python 2 and 3 into separate directories
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Sat, 01 Dec 2012 22:57:47 -0500
+
+pandas (0.9.1-1) experimental; urgency=low
+
+ * New upstream release
+ * Boosted policy to 3.9.3 (no due changes)
+ * debian/rules
+ - Fixed up cleaning up of cythonized files
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Wed, 14 Nov 2012 09:44:14 -0500
+
+pandas (0.9.0-1) experimental; urgency=low
+
+ * New upstream release
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Sun, 07 Oct 2012 21:26:23 -0400
+
+pandas (0.9.0~rc2-1) experimental; urgency=low
+
+ * New upstream release candidate
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Fri, 21 Sep 2012 10:27:52 -0400
+
+pandas (0.8.1-1) unstable; urgency=low
+
+ * Primarily a bugfix upstream release.
+ * up_tag_yahoo_test_requiring_network patch removed.
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Sun, 22 Jul 2012 20:13:16 -0400
+
+pandas (0.8.0-2) unstable; urgency=medium
+
+ * up_tag_yahoo_test_requiring_network patch cherry-picked from upstream
+ GIT so that tests would not be excercised at package build time
+ (Closes: #681449)
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Fri, 13 Jul 2012 08:54:41 -0400
+
+pandas (0.8.0-1) unstable; urgency=low
+
+ * Fresh upstream release
+ * debian/control
+ - drop python-statsmodels from Build-Depends since it might not be yet
+ available on some architectures and is not critical for the test
+ - recommend python-statsmodels instead of deprecated
+ python-scikits.statsmodels
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Fri, 29 Jun 2012 13:02:28 -0400
+
+pandas (0.8.0~rc2+git26-g76c6351-1) experimental; urgency=low
+
+ * Fresh upstream release candidate
+ - all patches dropped (upstreamed)
+ - requires numpy >= 1.6
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Tue, 12 Jun 2012 13:23:27 -0400
+
+pandas (0.7.3-1) unstable; urgency=low
+
+ * Fresh upstream release
+ - few post-release patches (submitted upstream) to exclude unittests
+ requiring network access
+ * debian/control:
+ - python-openpyxl, python-xlwt, python-xlrd into Build-Depends
+ and Recommends
+ * debian/rules:
+ - exclude running tests marked with @network
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Thu, 12 Apr 2012 11:27:31 -0400
+
+pandas (0.7.1+git1-ga2e86c2-1) unstable; urgency=low
+
+ * New upstream release with a bugfix which followed
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Thu, 01 Mar 2012 22:28:10 -0500
+
+pandas (0.7.0-1) unstable; urgency=low
+
+ * New upstream release
+ * Updated pre-cythoned .c files for older Debian/Ubuntu releases.
+ Added a stamp file with upstream version to assure up-to-dateness
+ of the generated files
+ * Dropped all exclusions of unittests and patches -- shouldn't be necessary
+ any longer
+ * Build only for requested versions (not all supported) of Python
+ * Do nothing for build operation, rely on overloaded install
+ (to avoid undesired re-cythonization on elderly Ubuntus)
+ * Adjusted url in watch due to migration of repository under pydata
+ organization
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Mon, 16 Jan 2012 19:31:50 -0500
+
+pandas (0.6.1-1) UNRELEASED; urgency=low
+
+ * New upstream release
+ * python-tk into Build-Depends
+ * Create matplotlibrc with backend: Agg to allow tests run without $DISPLAY
+ * Carry pre-cythonized .c files for systems with older Cython
+ * Skip few tests known to fail
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Tue, 13 Dec 2011 18:36:11 -0500
+
+pandas (0.5.0+git7-gcf32be2-1) unstable; urgency=low
+
+ * New upstream release with post-release fixes
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Tue, 01 Nov 2011 21:15:06 -0400
+
+pandas (0.4.3-1) unstable; urgency=low
+
+ * New upstream release(s): primarily bugfixes and optimizations but also
+ with some minor API changes and new functionality
+ * Adjusted debian/watch to match new layout on github
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Tue, 18 Oct 2011 11:27:50 -0400
+
+pandas (0.4.1-1) unstable; urgency=low
+
+ * New upstream bugfix release
+ - incorporated all debian/patches
+ * debian/rules: 'clean' removes generated pandas/version.py
+ * debian/copyright: adjusted to become DEP-5 compliant
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Sun, 25 Sep 2011 21:48:30 -0400
+
+pandas (0.4.0-1) unstable; urgency=low
+
+ * Initial Debian release (Closes: #641464)
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Tue, 13 Sep 2011 12:24:05 -0400
--- /dev/null
+Description: Avoid failing when a dependency is too old
+
+(some of them are pulled in by other dependencies, so
+just not including them in d/control doesn't stop them being installed)
+
+Most of the content of this patch was generated by the following Python code:
+
+import pathlib
+import re
+
+basedir = pathlib.Path.cwd()
+if not (basedir / 'pandas/tests').exists():
+ raise FileNotFoundError('must be run from the pandas root')
+for source_file in basedir.glob('pandas/**/*.py'):
+ with open(source_file, 'r') as fd:
+ source_text = fd.read()
+ if 'pytest.importorskip' in source_text:
+ source_text = re.sub(r'pytest\.importorskip(.*)minversion', r'td.versioned_importorskip\1min_version', source_text)
+ source_text = re.sub(r'pytest\.importorskip', r'td.versioned_importorskip', source_text)
+ if '_test_decorators as td' not in source_text:
+ # add the import if it isn't already present
+ source_text, count = re.subn(r'^(import pandas|from pandas.*import)',r'import pandas.util._test_decorators as td\n\1', source_text, count=1, flags=re.MULTILINE)
+ if count != 1:
+ raise KeyError("failed to add import")
+ with open(source_file, 'w') as fd:
+ fd.write(source_text)
+
+Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
+Forwarded: no
+
+--- a/pandas/_testing/_io.py
++++ b/pandas/_testing/_io.py
+@@ -107,9 +107,9 @@ def round_trip_localpath(writer, reader,
+ pandas object
+ The original object that was serialized and then re-read.
+ """
+- import pytest
++ import pandas.util._test_decorators as td
+
+- LocalPath = pytest.importorskip("py.path").local
++ LocalPath = td.versioned_importorskip("py.path").local
+ if path is None:
+ path = "___localpath___"
+ with ensure_clean(path) as path:
+--- a/pandas/conftest.py
++++ b/pandas/conftest.py
+@@ -1816,7 +1816,7 @@ def ip():
+
+ Will raise a skip if IPython is not installed.
+ """
+- pytest.importorskip("IPython", minversion="6.0.0")
++ td.versioned_importorskip("IPython", min_version="6.0.0")
+ from IPython.core.interactiveshell import InteractiveShell
+
+ # GH#35711 make sure sqlite history file handle is not leaked
+@@ -1833,7 +1833,7 @@ def spmatrix(request):
+ """
+ Yields scipy sparse matrix classes.
+ """
+- sparse = pytest.importorskip("scipy.sparse")
++ sparse = td.versioned_importorskip("scipy.sparse")
+
+ return getattr(sparse, request.param + "_matrix")
+
+--- a/pandas/tests/apply/test_frame_apply.py
++++ b/pandas/tests/apply/test_frame_apply.py
+@@ -4,6 +4,7 @@ import warnings
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.core.dtypes.dtypes import CategoricalDtype
+
+ import pandas as pd
+@@ -35,7 +36,7 @@ def int_frame_const_col():
+ @pytest.fixture(params=["python", pytest.param("numba", marks=pytest.mark.single_cpu)])
+ def engine(request):
+ if request.param == "numba":
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+ return request.param
+
+
+--- a/pandas/tests/apply/test_numba.py
++++ b/pandas/tests/apply/test_numba.py
+@@ -26,7 +26,7 @@ def test_numba_vs_python_noop(float_fram
+
+ def test_numba_vs_python_string_index():
+ # GH#56189
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = DataFrame(
+ 1,
+ index=Index(["a", "b"], dtype="string[pyarrow_numpy]"),
+--- a/pandas/tests/arrays/categorical/test_warnings.py
++++ b/pandas/tests/arrays/categorical/test_warnings.py
+@@ -1,12 +1,13 @@
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas._testing as tm
+
+
+ class TestCategoricalWarnings:
+ def test_tab_complete_warning(self, ip):
+ # https://github.com/pandas-dev/pandas/issues/16409
+- pytest.importorskip("IPython", minversion="6.0.0")
++ td.versioned_importorskip("IPython", min_version="6.0.0")
+ from IPython.core.completer import provisionalcompleter
+
+ code = "import pandas as pd; c = pd.Categorical([])"
+--- a/pandas/tests/arrays/datetimes/test_constructors.py
++++ b/pandas/tests/arrays/datetimes/test_constructors.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas._libs import iNaT
+
+ from pandas.core.dtypes.dtypes import DatetimeTZDtype
+@@ -226,7 +227,7 @@ COARSE_TO_FINE_SAFE = [123, None, -123]
+ def test_from_arrow_with_different_units_and_timezones_with(
+ pa_unit, pd_unit, pa_tz, pd_tz, data
+ ):
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+
+ pa_type = pa.timestamp(pa_unit, tz=pa_tz)
+ arr = pa.array(data, type=pa_type)
+@@ -253,7 +254,7 @@ def test_from_arrow_with_different_units
+ ],
+ )
+ def test_from_arrow_from_empty(unit, tz):
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+
+ data = []
+ arr = pa.array(data)
+@@ -269,7 +270,7 @@ def test_from_arrow_from_empty(unit, tz)
+
+
+ def test_from_arrow_from_integers():
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+
+ data = [0, 123456789, None, 2**63 - 1, iNaT, -123456789]
+ arr = pa.array(data)
+--- a/pandas/tests/arrays/interval/test_interval_pyarrow.py
++++ b/pandas/tests/arrays/interval/test_interval_pyarrow.py
+@@ -1,13 +1,14 @@
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ import pandas._testing as tm
+ from pandas.core.arrays import IntervalArray
+
+
+ def test_arrow_extension_type():
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+
+ from pandas.core.arrays.arrow.extension_types import ArrowIntervalType
+
+@@ -23,7 +24,7 @@ def test_arrow_extension_type():
+
+
+ def test_arrow_array():
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+
+ from pandas.core.arrays.arrow.extension_types import ArrowIntervalType
+
+@@ -52,7 +53,7 @@ def test_arrow_array():
+
+
+ def test_arrow_array_missing():
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+
+ from pandas.core.arrays.arrow.extension_types import ArrowIntervalType
+
+@@ -89,7 +90,7 @@ def test_arrow_array_missing():
+ ids=["float", "datetime64[ns]"],
+ )
+ def test_arrow_table_roundtrip(breaks):
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+
+ from pandas.core.arrays.arrow.extension_types import ArrowIntervalType
+
+@@ -125,7 +126,7 @@ def test_arrow_table_roundtrip(breaks):
+ ids=["float", "datetime64[ns]"],
+ )
+ def test_arrow_table_roundtrip_without_metadata(breaks):
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+
+ arr = IntervalArray.from_breaks(breaks)
+ arr[1] = None
+@@ -145,7 +146,7 @@ def test_from_arrow_from_raw_struct_arra
+ # in case pyarrow lost the Interval extension type (eg on parquet roundtrip
+ # with datetime64[ns] subtype, see GH-45881), still allow conversion
+ # from arrow to IntervalArray
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+
+ arr = pa.array([{"left": 0, "right": 1}, {"left": 1, "right": 2}])
+ dtype = pd.IntervalDtype(np.dtype("int64"), closed="neither")
+--- a/pandas/tests/arrays/masked/test_arrow_compat.py
++++ b/pandas/tests/arrays/masked/test_arrow_compat.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ import pandas._testing as tm
+
+@@ -8,7 +9,7 @@ pytestmark = pytest.mark.filterwarnings(
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
+ )
+
+-pa = pytest.importorskip("pyarrow")
++pa = td.versioned_importorskip("pyarrow")
+
+ from pandas.core.arrays.arrow._arrow_utils import pyarrow_array_to_numpy_and_mask
+
+--- a/pandas/tests/arrays/period/test_arrow_compat.py
++++ b/pandas/tests/arrays/period/test_arrow_compat.py
+@@ -1,5 +1,6 @@
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.compat.pyarrow import pa_version_under10p1
+
+ from pandas.core.dtypes.dtypes import PeriodDtype
+@@ -16,7 +17,7 @@ pytestmark = pytest.mark.filterwarnings(
+ )
+
+
+-pa = pytest.importorskip("pyarrow")
++pa = td.versioned_importorskip("pyarrow")
+
+
+ def test_arrow_extension_type():
+--- a/pandas/tests/arrays/sparse/test_accessor.py
++++ b/pandas/tests/arrays/sparse/test_accessor.py
+@@ -3,6 +3,7 @@ import string
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import SparseDtype
+ import pandas._testing as tm
+@@ -26,7 +27,7 @@ class TestSeriesAccessor:
+ assert result == expected
+
+ def test_from_coo(self):
+- scipy_sparse = pytest.importorskip("scipy.sparse")
++ scipy_sparse = td.versioned_importorskip("scipy.sparse")
+
+ row = [0, 3, 1, 0]
+ col = [0, 3, 1, 2]
+@@ -64,7 +65,7 @@ class TestSeriesAccessor:
+ def test_to_coo(
+ self, sort_labels, expected_rows, expected_cols, expected_values_pos
+ ):
+- sp_sparse = pytest.importorskip("scipy.sparse")
++ sp_sparse = td.versioned_importorskip("scipy.sparse")
+
+ values = SparseArray([0, np.nan, 1, 0, None, 3], fill_value=0)
+ index = pd.MultiIndex.from_tuples(
+@@ -107,7 +108,7 @@ class TestFrameAccessor:
+ @pytest.mark.parametrize("labels", [None, list(string.ascii_letters[:10])])
+ @pytest.mark.parametrize("dtype", ["float64", "int64"])
+ def test_from_spmatrix(self, format, labels, dtype):
+- sp_sparse = pytest.importorskip("scipy.sparse")
++ sp_sparse = td.versioned_importorskip("scipy.sparse")
+
+ sp_dtype = SparseDtype(dtype, np.array(0, dtype=dtype).item())
+
+@@ -120,7 +121,7 @@ class TestFrameAccessor:
+
+ @pytest.mark.parametrize("format", ["csc", "csr", "coo"])
+ def test_from_spmatrix_including_explicit_zero(self, format):
+- sp_sparse = pytest.importorskip("scipy.sparse")
++ sp_sparse = td.versioned_importorskip("scipy.sparse")
+
+ mat = sp_sparse.random(10, 2, density=0.5, format=format)
+ mat.data[0] = 0
+@@ -134,7 +135,7 @@ class TestFrameAccessor:
+ [["a", "b"], pd.MultiIndex.from_product([["A"], ["a", "b"]]), ["a", "a"]],
+ )
+ def test_from_spmatrix_columns(self, columns):
+- sp_sparse = pytest.importorskip("scipy.sparse")
++ sp_sparse = td.versioned_importorskip("scipy.sparse")
+
+ dtype = SparseDtype("float64", 0.0)
+
+@@ -147,7 +148,7 @@ class TestFrameAccessor:
+ "colnames", [("A", "B"), (1, 2), (1, pd.NA), (0.1, 0.2), ("x", "x"), (0, 0)]
+ )
+ def test_to_coo(self, colnames):
+- sp_sparse = pytest.importorskip("scipy.sparse")
++ sp_sparse = td.versioned_importorskip("scipy.sparse")
+
+ df = pd.DataFrame(
+ {colnames[0]: [0, 1, 0], colnames[1]: [1, 0, 0]}, dtype="Sparse[int64, 0]"
+@@ -158,7 +159,7 @@ class TestFrameAccessor:
+
+ @pytest.mark.parametrize("fill_value", [1, np.nan])
+ def test_to_coo_nonzero_fill_val_raises(self, fill_value):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = pd.DataFrame(
+ {
+ "A": SparseArray(
+@@ -174,7 +175,7 @@ class TestFrameAccessor:
+
+ def test_to_coo_midx_categorical(self):
+ # GH#50996
+- sp_sparse = pytest.importorskip("scipy.sparse")
++ sp_sparse = td.versioned_importorskip("scipy.sparse")
+
+ midx = pd.MultiIndex.from_arrays(
+ [
+@@ -219,7 +220,7 @@ class TestFrameAccessor:
+ @pytest.mark.parametrize("dtype", ["int64", "float64"])
+ @pytest.mark.parametrize("dense_index", [True, False])
+ def test_series_from_coo(self, dtype, dense_index):
+- sp_sparse = pytest.importorskip("scipy.sparse")
++ sp_sparse = td.versioned_importorskip("scipy.sparse")
+
+ A = sp_sparse.eye(3, format="coo", dtype=dtype)
+ result = pd.Series.sparse.from_coo(A, dense_index=dense_index)
+@@ -239,7 +240,7 @@ class TestFrameAccessor:
+
+ def test_series_from_coo_incorrect_format_raises(self):
+ # gh-26554
+- sp_sparse = pytest.importorskip("scipy.sparse")
++ sp_sparse = td.versioned_importorskip("scipy.sparse")
+
+ m = sp_sparse.csr_matrix(np.array([[0, 1], [0, 0]]))
+ with pytest.raises(
+--- a/pandas/tests/arrays/sparse/test_constructors.py
++++ b/pandas/tests/arrays/sparse/test_constructors.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas._libs.sparse import IntIndex
+
+ import pandas as pd
+@@ -188,7 +189,7 @@ class TestConstructors:
+ @pytest.mark.parametrize("format", ["coo", "csc", "csr"])
+ @pytest.mark.parametrize("size", [0, 10])
+ def test_from_spmatrix(self, size, format):
+- sp_sparse = pytest.importorskip("scipy.sparse")
++ sp_sparse = td.versioned_importorskip("scipy.sparse")
+
+ mat = sp_sparse.random(size, 1, density=0.5, format=format)
+ result = SparseArray.from_spmatrix(mat)
+@@ -199,7 +200,7 @@ class TestConstructors:
+
+ @pytest.mark.parametrize("format", ["coo", "csc", "csr"])
+ def test_from_spmatrix_including_explicit_zero(self, format):
+- sp_sparse = pytest.importorskip("scipy.sparse")
++ sp_sparse = td.versioned_importorskip("scipy.sparse")
+
+ mat = sp_sparse.random(10, 1, density=0.5, format=format)
+ mat.data[0] = 0
+@@ -210,7 +211,7 @@ class TestConstructors:
+ tm.assert_numpy_array_equal(result, expected)
+
+ def test_from_spmatrix_raises(self):
+- sp_sparse = pytest.importorskip("scipy.sparse")
++ sp_sparse = td.versioned_importorskip("scipy.sparse")
+
+ mat = sp_sparse.eye(5, 4, format="csc")
+
+--- a/pandas/tests/arrays/string_/test_string.py
++++ b/pandas/tests/arrays/string_/test_string.py
+@@ -7,6 +7,7 @@ import operator
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.compat.pyarrow import pa_version_under12p0
+
+ from pandas.core.dtypes.common import is_dtype_equal
+@@ -486,7 +487,7 @@ def test_fillna_args(dtype, arrow_string
+
+ def test_arrow_array(dtype):
+ # protocol added in 0.15.0
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ import pyarrow.compute as pc
+
+ data = pd.array(["a", "b", "c"], dtype=dtype)
+@@ -502,7 +503,7 @@ def test_arrow_array(dtype):
+ @pytest.mark.filterwarnings("ignore:Passing a BlockManager:DeprecationWarning")
+ def test_arrow_roundtrip(dtype, string_storage2, request, using_infer_string):
+ # roundtrip possible from arrow 1.0.0
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+
+ if using_infer_string and string_storage2 != "pyarrow_numpy":
+ request.applymarker(
+@@ -532,7 +533,7 @@ def test_arrow_load_from_zero_chunks(
+ dtype, string_storage2, request, using_infer_string
+ ):
+ # GH-41040
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+
+ if using_infer_string and string_storage2 != "pyarrow_numpy":
+ request.applymarker(
+--- a/pandas/tests/arrays/string_/test_string_arrow.py
++++ b/pandas/tests/arrays/string_/test_string_arrow.py
+@@ -19,7 +19,7 @@ from pandas.core.arrays.string_arrow imp
+
+
+ def test_eq_all_na():
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ a = pd.array([pd.NA, pd.NA], dtype=StringDtype("pyarrow"))
+ result = a == a
+ expected = pd.array([pd.NA, pd.NA], dtype="boolean[pyarrow]")
+@@ -48,7 +48,7 @@ def test_config_bad_storage_raises():
+ @pytest.mark.parametrize("chunked", [True, False])
+ @pytest.mark.parametrize("array", ["numpy", "pyarrow"])
+ def test_constructor_not_string_type_raises(array, chunked, arrow_string_storage):
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+
+ array = pa if array in arrow_string_storage else np
+
+@@ -69,7 +69,7 @@ def test_constructor_not_string_type_rai
+
+ @pytest.mark.parametrize("chunked", [True, False])
+ def test_constructor_not_string_type_value_dictionary_raises(chunked):
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+
+ arr = pa.array([1, 2, 3], pa.dictionary(pa.int32(), pa.int32()))
+ if chunked:
+@@ -87,7 +87,7 @@ def test_constructor_not_string_type_val
+ )
+ @pytest.mark.parametrize("chunked", [True, False])
+ def test_constructor_valid_string_type_value_dictionary(chunked):
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+
+ arr = pa.array(["1", "2", "3"], pa.large_string()).dictionary_encode()
+ if chunked:
+@@ -99,14 +99,14 @@ def test_constructor_valid_string_type_v
+
+ def test_constructor_from_list():
+ # GH#27673
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ result = pd.Series(["E"], dtype=StringDtype(storage="pyarrow"))
+ assert isinstance(result.dtype, StringDtype)
+ assert result.dtype.storage == "pyarrow"
+
+
+ def test_from_sequence_wrong_dtype_raises(using_infer_string):
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ with pd.option_context("string_storage", "python"):
+ ArrowStringArray._from_sequence(["a", None, "c"], dtype="string")
+
+@@ -199,7 +199,7 @@ def test_pyarrow_not_installed_raises():
+ ],
+ )
+ def test_setitem(multiple_chunks, key, value, expected):
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+
+ result = pa.array(list("abcde"))
+ expected = pa.array(expected)
+@@ -216,7 +216,7 @@ def test_setitem(multiple_chunks, key, v
+
+
+ def test_setitem_invalid_indexer_raises():
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+
+ arr = ArrowStringArray(pa.array(list("abcde")))
+
+@@ -242,7 +242,7 @@ def test_setitem_invalid_indexer_raises(
+ @pytest.mark.parametrize("dtype", ["string[pyarrow]", "string[pyarrow_numpy]"])
+ def test_pickle_roundtrip(dtype):
+ # GH 42600
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ expected = pd.Series(range(10), dtype=dtype)
+ expected_sliced = expected.head(2)
+ full_pickled = pickle.dumps(expected)
+@@ -259,7 +259,7 @@ def test_pickle_roundtrip(dtype):
+
+ def test_string_dtype_error_message():
+ # GH#55051
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ msg = "Storage must be 'python', 'pyarrow' or 'pyarrow_numpy'."
+ with pytest.raises(ValueError, match=msg):
+ StringDtype("bla")
+--- a/pandas/tests/computation/test_compat.py
++++ b/pandas/tests/computation/test_compat.py
+@@ -1,5 +1,6 @@
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.compat._optional import VERSIONS
+
+ import pandas as pd
+@@ -13,7 +14,7 @@ def test_compat():
+
+ from pandas.core.computation.check import NUMEXPR_INSTALLED
+
+- ne = pytest.importorskip("numexpr")
++ ne = td.versioned_importorskip("numexpr")
+
+ ver = ne.__version__
+ if Version(ver) < Version(VERSIONS["numexpr"]):
+@@ -26,7 +27,7 @@ def test_compat():
+ @pytest.mark.parametrize("parser", expr.PARSERS)
+ def test_invalid_numexpr_version(engine, parser):
+ if engine == "numexpr":
+- pytest.importorskip("numexpr")
++ td.versioned_importorskip("numexpr")
+ a, b = 1, 2 # noqa: F841
+ res = pd.eval("a + b", engine=engine, parser=parser)
+ assert res == 3
+--- a/pandas/tests/copy_view/test_astype.py
++++ b/pandas/tests/copy_view/test_astype.py
+@@ -45,7 +45,7 @@ def test_astype_single_dtype(using_copy_
+ @pytest.mark.parametrize("new_dtype", ["int64", "Int64", "int64[pyarrow]"])
+ def test_astype_avoids_copy(using_copy_on_write, dtype, new_dtype):
+ if new_dtype == "int64[pyarrow]":
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = DataFrame({"a": [1, 2, 3]}, dtype=dtype)
+ df_orig = df.copy()
+ df2 = df.astype(new_dtype)
+@@ -70,7 +70,7 @@ def test_astype_avoids_copy(using_copy_o
+ @pytest.mark.parametrize("dtype", ["float64", "int32", "Int32", "int32[pyarrow]"])
+ def test_astype_different_target_dtype(using_copy_on_write, dtype):
+ if dtype == "int32[pyarrow]":
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = DataFrame({"a": [1, 2, 3]})
+ df_orig = df.copy()
+ df2 = df.astype(dtype)
+@@ -198,7 +198,7 @@ def test_astype_different_timezones_diff
+
+
+ def test_astype_arrow_timestamp(using_copy_on_write):
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = DataFrame(
+ {
+ "a": [
+--- a/pandas/tests/dtypes/test_common.py
++++ b/pandas/tests/dtypes/test_common.py
+@@ -214,7 +214,7 @@ def test_is_sparse(check_scipy):
+
+
+ def test_is_scipy_sparse():
+- sp_sparse = pytest.importorskip("scipy.sparse")
++ sp_sparse = td.versioned_importorskip("scipy.sparse")
+
+ assert com.is_scipy_sparse(sp_sparse.bsr_matrix([1, 2, 3]))
+
+--- a/pandas/tests/dtypes/test_inference.py
++++ b/pandas/tests/dtypes/test_inference.py
+@@ -28,6 +28,7 @@ import numpy as np
+ import pytest
+ import pytz
+
++import pandas.util._test_decorators as td
+ from pandas._libs import (
+ lib,
+ missing as libmissing,
+@@ -1984,7 +1985,7 @@ def test_nan_to_nat_conversions():
+
+ @pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
+ def test_is_scipy_sparse(spmatrix):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ assert is_scipy_sparse(spmatrix([[0, 1]]))
+ assert not is_scipy_sparse(np.array([1]))
+
+--- a/pandas/tests/extension/test_arrow.py
++++ b/pandas/tests/extension/test_arrow.py
+@@ -62,7 +62,7 @@ from pandas.api.types import (
+ )
+ from pandas.tests.extension import base
+
+-pa = pytest.importorskip("pyarrow")
++pa = td.versioned_importorskip("pyarrow")
+
+ from pandas.core.arrays.arrow.array import ArrowExtensionArray
+ from pandas.core.arrays.arrow.extension_types import ArrowPeriodType
+--- a/pandas/tests/extension/test_string.py
++++ b/pandas/tests/extension/test_string.py
+@@ -21,6 +21,7 @@ from typing import cast
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ import pandas._testing as tm
+ from pandas.api.types import is_string_dtype
+@@ -35,7 +36,7 @@ def maybe_split_array(arr, chunked):
+ elif arr.dtype.storage != "pyarrow":
+ return arr
+
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+
+ arrow_array = arr._pa_array
+ split = len(arrow_array) // 2
+--- a/pandas/tests/frame/indexing/test_indexing.py
++++ b/pandas/tests/frame/indexing/test_indexing.py
+@@ -1945,7 +1945,7 @@ def test_adding_new_conditional_column()
+ )
+ def test_adding_new_conditional_column_with_string(dtype, infer_string) -> None:
+ # https://github.com/pandas-dev/pandas/issues/56204
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+
+ df = DataFrame({"a": [1, 2], "b": [3, 4]})
+ with pd.option_context("future.infer_string", infer_string):
+@@ -1958,7 +1958,7 @@ def test_adding_new_conditional_column_w
+
+ def test_add_new_column_infer_string():
+ # GH#55366
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = DataFrame({"x": [1]})
+ with pd.option_context("future.infer_string", True):
+ df.loc[df["x"] == 1, "y"] = "1"
+--- a/pandas/tests/frame/indexing/test_setitem.py
++++ b/pandas/tests/frame/indexing/test_setitem.py
+@@ -760,7 +760,7 @@ class TestDataFrameSetItem:
+
+ def test_setitem_string_option_object_index(self):
+ # GH#55638
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = DataFrame({"a": [1, 2]})
+ with pd.option_context("future.infer_string", True):
+ df["b"] = Index(["a", "b"], dtype=object)
+--- a/pandas/tests/frame/methods/test_astype.py
++++ b/pandas/tests/frame/methods/test_astype.py
+@@ -893,7 +893,7 @@ def test_frame_astype_no_copy():
+ @pytest.mark.parametrize("dtype", ["int64", "Int64"])
+ def test_astype_copies(dtype):
+ # GH#50984
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = DataFrame({"a": [1, 2, 3]}, dtype=dtype)
+ result = df.astype("int64[pyarrow]", copy=True)
+ df.iloc[0, 0] = 100
+--- a/pandas/tests/frame/methods/test_convert_dtypes.py
++++ b/pandas/tests/frame/methods/test_convert_dtypes.py
+@@ -3,6 +3,7 @@ import datetime
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ import pandas._testing as tm
+
+@@ -49,7 +50,7 @@ class TestConvertDtypes:
+ assert result.columns.name == "cols"
+
+ def test_pyarrow_dtype_backend(self):
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ df = pd.DataFrame(
+ {
+ "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),
+@@ -105,13 +106,13 @@ class TestConvertDtypes:
+ tm.assert_frame_equal(result, expected)
+
+ def test_pyarrow_dtype_backend_already_pyarrow(self):
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ expected = pd.DataFrame([1, 2, 3], dtype="int64[pyarrow]")
+ result = expected.convert_dtypes(dtype_backend="pyarrow")
+ tm.assert_frame_equal(result, expected)
+
+ def test_pyarrow_dtype_backend_from_pandas_nullable(self):
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ df = pd.DataFrame(
+ {
+ "a": pd.Series([1, 2, None], dtype="Int32"),
+@@ -135,7 +136,7 @@ class TestConvertDtypes:
+
+ def test_pyarrow_dtype_empty_object(self):
+ # GH 50970
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ expected = pd.DataFrame(columns=[0])
+ result = expected.convert_dtypes(dtype_backend="pyarrow")
+ tm.assert_frame_equal(result, expected)
+@@ -152,7 +153,7 @@ class TestConvertDtypes:
+
+ def test_pyarrow_backend_no_conversion(self):
+ # GH#52872
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = pd.DataFrame({"a": [1, 2], "b": 1.5, "c": True, "d": "x"})
+ expected = df.copy()
+ result = df.convert_dtypes(
+@@ -166,7 +167,7 @@ class TestConvertDtypes:
+
+ def test_convert_dtypes_pyarrow_to_np_nullable(self):
+ # GH 53648
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ ser = pd.DataFrame(range(2), dtype="int32[pyarrow]")
+ result = ser.convert_dtypes(dtype_backend="numpy_nullable")
+ expected = pd.DataFrame(range(2), dtype="Int32")
+@@ -174,7 +175,7 @@ class TestConvertDtypes:
+
+ def test_convert_dtypes_pyarrow_timestamp(self):
+ # GH 54191
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ ser = pd.Series(pd.date_range("2020-01-01", "2020-01-02", freq="1min"))
+ expected = ser.astype("timestamp[ms][pyarrow]")
+ result = expected.convert_dtypes(dtype_backend="pyarrow")
+--- a/pandas/tests/frame/methods/test_cov_corr.py
++++ b/pandas/tests/frame/methods/test_cov_corr.py
+@@ -105,7 +105,7 @@ class TestDataFrameCorr:
+
+ @pytest.mark.parametrize("method", ["pearson", "kendall", "spearman"])
+ def test_corr_scipy_method(self, float_frame, method):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ float_frame.loc[float_frame.index[:5], "A"] = np.nan
+ float_frame.loc[float_frame.index[5:10], "B"] = np.nan
+ float_frame.loc[float_frame.index[:10], "A"] = float_frame["A"][10:20].copy()
+@@ -126,7 +126,7 @@ class TestDataFrameCorr:
+ @pytest.mark.parametrize("meth", ["pearson", "kendall", "spearman"])
+ def test_corr_nooverlap(self, meth):
+ # nothing in common
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = DataFrame(
+ {
+ "A": [1, 1.5, 1, np.nan, np.nan, np.nan],
+@@ -159,7 +159,7 @@ class TestDataFrameCorr:
+ # when dtypes of pandas series are different
+ # then ndarray will have dtype=object,
+ # so it need to be properly handled
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = DataFrame({"a": [True, False], "b": [1, 0]})
+
+ expected = DataFrame(np.ones((2, 2)), index=["a", "b"], columns=["a", "b"])
+@@ -201,7 +201,7 @@ class TestDataFrameCorr:
+ @pytest.mark.parametrize("method", ["pearson", "spearman", "kendall"])
+ def test_corr_nullable_integer(self, nullable_column, other_column, method):
+ # https://github.com/pandas-dev/pandas/issues/33803
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ data = DataFrame({"a": nullable_column, "b": other_column})
+ result = data.corr(method=method)
+ expected = DataFrame(np.ones((2, 2)), columns=["a", "b"], index=["a", "b"])
+@@ -250,7 +250,7 @@ class TestDataFrameCorr:
+
+ @pytest.mark.parametrize("method", ["pearson", "spearman", "kendall"])
+ def test_corr_min_periods_greater_than_length(self, method):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = DataFrame({"A": [1, 2], "B": [1, 2]})
+ result = df.corr(method=method, min_periods=3)
+ expected = DataFrame(
+@@ -264,7 +264,7 @@ class TestDataFrameCorr:
+ # when dtypes of pandas series are different
+ # then ndarray will have dtype=object,
+ # so it need to be properly handled
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = DataFrame({"a": [1, 0], "b": [1, 0], "c": ["x", "y"]})
+ expected = DataFrame(np.ones((2, 2)), index=["a", "b"], columns=["a", "b"])
+ if numeric_only:
+@@ -433,7 +433,7 @@ class TestDataFrameCorrWith:
+
+ def test_corrwith_spearman(self):
+ # GH#21925
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = DataFrame(np.random.default_rng(2).random(size=(100, 3)))
+ result = df.corrwith(df**2, method="spearman")
+ expected = Series(np.ones(len(result)))
+@@ -441,7 +441,7 @@ class TestDataFrameCorrWith:
+
+ def test_corrwith_kendall(self):
+ # GH#21925
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = DataFrame(np.random.default_rng(2).random(size=(100, 3)))
+ result = df.corrwith(df**2, method="kendall")
+ expected = Series(np.ones(len(result)))
+@@ -449,7 +449,7 @@ class TestDataFrameCorrWith:
+
+ def test_corrwith_spearman_with_tied_data(self):
+ # GH#48826
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df1 = DataFrame(
+ {
+ "A": [1, np.nan, 7, 8],
+--- a/pandas/tests/frame/methods/test_describe.py
++++ b/pandas/tests/frame/methods/test_describe.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+ Categorical,
+@@ -398,7 +399,7 @@ class TestDataFrameDescribe:
+
+ def test_describe_exclude_pa_dtype(self):
+ # GH#52570
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ df = DataFrame(
+ {
+ "a": Series([1, 2, 3], dtype=pd.ArrowDtype(pa.int8())),
+--- a/pandas/tests/frame/methods/test_dot.py
++++ b/pandas/tests/frame/methods/test_dot.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import (
+ DataFrame,
+ Series,
+@@ -144,7 +145,7 @@ class TestDataFrameDot(DotSharedTests):
+ [("Float32", "Float64"), ("Int16", "Int32"), ("float[pyarrow]", "double[pyarrow]")],
+ )
+ def test_arrow_dtype(dtype, exp_dtype):
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+
+ cols = ["a", "b"]
+ df_a = DataFrame([[1, 2], [3, 4], [5, 6]], columns=cols, dtype="int32")
+--- a/pandas/tests/frame/methods/test_info.py
++++ b/pandas/tests/frame/methods/test_info.py
+@@ -7,6 +7,7 @@ import textwrap
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.compat import (
+ IS64,
+ PYPY,
+@@ -527,7 +528,7 @@ def test_memory_usage_empty_no_warning()
+ @pytest.mark.single_cpu
+ def test_info_compute_numba():
+ # GH#51922
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+ df = DataFrame([[1, 2], [3, 4]])
+
+ with option_context("compute.use_numba", True):
+--- a/pandas/tests/frame/methods/test_interpolate.py
++++ b/pandas/tests/frame/methods/test_interpolate.py
+@@ -213,7 +213,7 @@ class TestDataFrameInterpolate:
+ df.interpolate(method="values")
+
+ def test_interp_various(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = DataFrame(
+ {"A": [1, 2, np.nan, 4, 5, np.nan, 7], "C": [1, 2, 3, 5, 8, 13, 21]}
+ )
+@@ -252,7 +252,7 @@ class TestDataFrameInterpolate:
+ tm.assert_frame_equal(result, expected, check_dtype=False)
+
+ def test_interp_alt_scipy(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = DataFrame(
+ {"A": [1, 2, np.nan, 4, 5, np.nan, 7], "C": [1, 2, 3, 5, 8, 13, 21]}
+ )
+@@ -541,7 +541,7 @@ class TestDataFrameInterpolate:
+ )
+ def test_interpolate_arrow(self, dtype):
+ # GH#55347
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = DataFrame({"a": [1, None, None, None, 3]}, dtype=dtype + "[pyarrow]")
+ result = df.interpolate(limit=2)
+ expected = DataFrame({"a": [1, 1.5, 2.0, None, 3]}, dtype="float64[pyarrow]")
+--- a/pandas/tests/frame/methods/test_join.py
++++ b/pandas/tests/frame/methods/test_join.py
+@@ -3,6 +3,7 @@ from datetime import datetime
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.errors import MergeError
+
+ import pandas as pd
+@@ -163,7 +164,7 @@ def test_join_on_single_col_dup_on_right
+ # GH 46622
+ # Dups on right allowed by one_to_many constraint
+ if dtype == "string[pyarrow]":
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ left_no_dup = left_no_dup.astype(dtype)
+ right_w_dups.index = right_w_dups.index.astype(dtype)
+ left_no_dup.join(
+--- a/pandas/tests/frame/methods/test_rank.py
++++ b/pandas/tests/frame/methods/test_rank.py
+@@ -6,6 +6,7 @@ from datetime import (
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas._libs.algos import (
+ Infinity,
+ NegInfinity,
+@@ -39,7 +40,7 @@ class TestRank:
+ return request.param
+
+ def test_rank(self, float_frame):
+- sp_stats = pytest.importorskip("scipy.stats")
++ sp_stats = td.versioned_importorskip("scipy.stats")
+
+ float_frame.loc[::2, "A"] = np.nan
+ float_frame.loc[::3, "B"] = np.nan
+@@ -143,7 +144,7 @@ class TestRank:
+ float_string_frame.rank(axis=1)
+
+ def test_rank_na_option(self, float_frame):
+- sp_stats = pytest.importorskip("scipy.stats")
++ sp_stats = td.versioned_importorskip("scipy.stats")
+
+ float_frame.loc[::2, "A"] = np.nan
+ float_frame.loc[::3, "B"] = np.nan
+@@ -227,7 +228,7 @@ class TestRank:
+ @pytest.mark.parametrize("ax", [0, 1])
+ @pytest.mark.parametrize("m", ["average", "min", "max", "first", "dense"])
+ def test_rank_methods_frame(self, ax, m):
+- sp_stats = pytest.importorskip("scipy.stats")
++ sp_stats = td.versioned_importorskip("scipy.stats")
+
+ xs = np.random.default_rng(2).integers(0, 21, (100, 26))
+ xs = (xs - 10.0) / 10.0
+@@ -503,7 +504,7 @@ class TestRank:
+ )
+ def test_rank_string_dtype(self, dtype, exp_dtype):
+ # GH#55362
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ obj = Series(["foo", "foo", None, "foo"], dtype=dtype)
+ result = obj.rank(method="first")
+ expected = Series([1, 2, None, 3], dtype=exp_dtype)
+--- a/pandas/tests/frame/test_api.py
++++ b/pandas/tests/frame/test_api.py
+@@ -5,6 +5,7 @@ import pydoc
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas._config import using_pyarrow_string_dtype
+ from pandas._config.config import option_context
+
+@@ -288,7 +289,7 @@ class TestDataFrameMisc:
+
+ def test_tab_complete_warning(self, ip, frame_or_series):
+ # GH 16409
+- pytest.importorskip("IPython", minversion="6.0.0")
++ td.versioned_importorskip("IPython", min_version="6.0.0")
+ from IPython.core.completer import provisionalcompleter
+
+ if frame_or_series is DataFrame:
+@@ -383,7 +384,7 @@ class TestDataFrameMisc:
+
+ def test_inspect_getmembers(self):
+ # GH38740
+- pytest.importorskip("jinja2")
++ td.versioned_importorskip("jinja2")
+ df = DataFrame()
+ msg = "DataFrame._data is deprecated"
+ with tm.assert_produces_warning(
+--- a/pandas/tests/frame/test_arrow_interface.py
++++ b/pandas/tests/frame/test_arrow_interface.py
+@@ -6,7 +6,7 @@ import pandas.util._test_decorators as t
+
+ import pandas as pd
+
+-pa = pytest.importorskip("pyarrow")
++pa = td.versioned_importorskip("pyarrow")
+
+
+ @td.skip_if_no("pyarrow", min_version="14.0")
+--- a/pandas/tests/frame/test_constructors.py
++++ b/pandas/tests/frame/test_constructors.py
+@@ -2704,7 +2704,7 @@ class TestDataFrameConstructors:
+
+ def test_frame_string_inference(self):
+ # GH#54430
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ dtype = "string[pyarrow_numpy]"
+ expected = DataFrame(
+ {"a": ["a", "b"]}, dtype=dtype, columns=Index(["a"], dtype=dtype)
+@@ -2739,7 +2739,7 @@ class TestDataFrameConstructors:
+
+ def test_frame_string_inference_array_string_dtype(self):
+ # GH#54496
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ dtype = "string[pyarrow_numpy]"
+ expected = DataFrame(
+ {"a": ["a", "b"]}, dtype=dtype, columns=Index(["a"], dtype=dtype)
+@@ -2764,7 +2764,7 @@ class TestDataFrameConstructors:
+
+ def test_frame_string_inference_block_dim(self):
+ # GH#55363
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ with pd.option_context("future.infer_string", True):
+ df = DataFrame(np.array([["hello", "goodbye"], ["hello", "Hello"]]))
+ assert df._mgr.blocks[0].ndim == 2
+@@ -2852,7 +2852,7 @@ class TestDataFrameConstructorIndexInfer
+ )
+ def test_dict_data_arrow_column_expansion(self, key_val, col_vals, col_type):
+ # GH 53617
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ cols = pd.arrays.ArrowExtensionArray(
+ pa.array(col_vals, type=pa.dictionary(pa.int8(), getattr(pa, col_type)()))
+ )
+--- a/pandas/tests/frame/test_query_eval.py
++++ b/pandas/tests/frame/test_query_eval.py
+@@ -1384,7 +1384,7 @@ class TestDataFrameQueryBacktickQuoting:
+ @pytest.mark.parametrize("dtype", ["int64", "Int64", "int64[pyarrow]"])
+ def test_query_ea_dtypes(self, dtype):
+ if dtype == "int64[pyarrow]":
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ # GH#50261
+ df = DataFrame({"a": Series([1, 2], dtype=dtype)})
+ ref = {2} # noqa: F841
+@@ -1402,7 +1402,7 @@ class TestDataFrameQueryBacktickQuoting:
+ if engine == "numexpr" and not NUMEXPR_INSTALLED:
+ pytest.skip("numexpr not installed")
+ if dtype == "int64[pyarrow]":
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = DataFrame(
+ {"A": Series([1, 1, 2], dtype="Int64"), "B": Series([1, 2, 2], dtype=dtype)}
+ )
+--- a/pandas/tests/frame/test_reductions.py
++++ b/pandas/tests/frame/test_reductions.py
+@@ -369,7 +369,7 @@ class TestDataFrameAnalytics:
+ )
+
+ def test_stat_op_calc_skew_kurtosis(self, float_frame_with_na):
+- sp_stats = pytest.importorskip("scipy.stats")
++ sp_stats = td.versioned_importorskip("scipy.stats")
+
+ def skewness(x):
+ if len(x) < 3:
+@@ -1162,7 +1162,7 @@ class TestDataFrameAnalytics:
+
+ def test_idxmax_arrow_types(self):
+ # GH#55368
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+
+ df = DataFrame({"a": [2, 3, 1], "b": [2, 1, 1]}, dtype="int64[pyarrow]")
+ result = df.idxmax()
+@@ -2020,7 +2020,7 @@ def test_reduction_axis_none_returns_sca
+ result = getattr(df, method)(axis=None, numeric_only=numeric_only)
+ np_arr = df.to_numpy(dtype=np.float64)
+ if method in {"skew", "kurt"}:
+- comp_mod = pytest.importorskip("scipy.stats")
++ comp_mod = td.versioned_importorskip("scipy.stats")
+ if method == "kurt":
+ method = "kurtosis"
+ expected = getattr(comp_mod, method)(np_arr, bias=False, axis=None)
+--- a/pandas/tests/frame/test_repr.py
++++ b/pandas/tests/frame/test_repr.py
+@@ -7,6 +7,7 @@ from io import StringIO
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas._config import using_pyarrow_string_dtype
+
+ from pandas import (
+@@ -287,7 +288,7 @@ NaT 4"""
+ assert "StringCol" in repr(df)
+
+ def test_latex_repr(self):
+- pytest.importorskip("jinja2")
++ td.versioned_importorskip("jinja2")
+ expected = r"""\begin{tabular}{llll}
+ \toprule
+ & 0 & 1 & 2 \\
+@@ -475,7 +476,7 @@ NaT 4"""
+
+ def test_repr_ea_columns(self, any_string_dtype):
+ # GH#54797
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = DataFrame({"long_column_name": [1, 2, 3], "col2": [4, 5, 6]})
+ df.columns = df.columns.astype(any_string_dtype)
+ expected = """ long_column_name col2
+--- a/pandas/tests/frame/test_subclass.py
++++ b/pandas/tests/frame/test_subclass.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+ DataFrame,
+@@ -669,7 +670,7 @@ class TestDataFrameSubclassing:
+ assert isinstance(result, tm.SubclassedSeries)
+
+ def test_corrwith(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ index = ["a", "b", "c", "d", "e"]
+ columns = ["one", "two", "three", "four"]
+ df1 = tm.SubclassedDataFrame(
+--- a/pandas/tests/frame/test_ufunc.py
++++ b/pandas/tests/frame/test_ufunc.py
+@@ -4,6 +4,7 @@ import re
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ import pandas._testing as tm
+ from pandas.api.types import is_extension_array_dtype
+@@ -250,7 +251,7 @@ def test_alignment_deprecation_many_inpu
+ # https://github.com/pandas-dev/pandas/issues/39184
+ # test that the deprecation also works with > 2 inputs -> using a numba
+ # written ufunc for this because numpy itself doesn't have such ufuncs
+- numba = pytest.importorskip("numba")
++ numba = td.versioned_importorskip("numba")
+
+ @numba.vectorize([numba.float64(numba.float64, numba.float64, numba.float64)])
+ def my_ufunc(x, y, z):
+--- a/pandas/tests/generic/test_finalize.py
++++ b/pandas/tests/generic/test_finalize.py
+@@ -7,6 +7,7 @@ import re
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ import pandas._testing as tm
+
+@@ -443,7 +444,7 @@ def test_finalize_last(data):
+
+ @not_implemented_mark
+ def test_finalize_called_eval_numexpr():
+- pytest.importorskip("numexpr")
++ td.versioned_importorskip("numexpr")
+ df = pd.DataFrame({"A": [1, 2]})
+ df.attrs["A"] = 1
+ result = df.eval("A + 1", engine="numexpr")
+--- a/pandas/tests/generic/test_to_xarray.py
++++ b/pandas/tests/generic/test_to_xarray.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import (
+ Categorical,
+ DataFrame,
+@@ -10,7 +11,7 @@ from pandas import (
+ )
+ import pandas._testing as tm
+
+-pytest.importorskip("xarray")
++td.versioned_importorskip("xarray")
+
+
+ class TestDataFrameToXArray:
+--- a/pandas/tests/groupby/aggregate/test_numba.py
++++ b/pandas/tests/groupby/aggregate/test_numba.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.errors import NumbaUtilError
+
+ from pandas import (
+@@ -22,7 +23,7 @@ pytestmark = pytest.mark.single_cpu
+
+
+ def test_correct_function_signature():
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+
+ def incorrect_function(x):
+ return sum(x) * 2.7
+@@ -39,7 +40,7 @@ def test_correct_function_signature():
+
+
+ def test_check_nopython_kwargs():
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+
+ def incorrect_function(values, index):
+ return sum(values) * 2.7
+@@ -61,7 +62,7 @@ def test_check_nopython_kwargs():
+ @pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])
+ @pytest.mark.parametrize("as_index", [True, False])
+ def test_numba_vs_cython(jit, pandas_obj, nogil, parallel, nopython, as_index):
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+
+ def func_numba(values, index):
+ return np.mean(values) * 2.7
+@@ -92,7 +93,7 @@ def test_numba_vs_cython(jit, pandas_obj
+ @pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])
+ def test_cache(jit, pandas_obj, nogil, parallel, nopython):
+ # Test that the functions are cached correctly if we switch functions
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+
+ def func_1(values, index):
+ return np.mean(values) - 3.4
+@@ -130,7 +131,7 @@ def test_cache(jit, pandas_obj, nogil, p
+
+
+ def test_use_global_config():
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+
+ def func_1(values, index):
+ return np.mean(values) - 3.4
+@@ -155,7 +156,7 @@ def test_use_global_config():
+ ],
+ )
+ def test_multifunc_numba_vs_cython_frame(agg_kwargs):
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+ data = DataFrame(
+ {
+ 0: ["a", "a", "b", "b", "a"],
+@@ -190,7 +191,7 @@ def test_multifunc_numba_vs_cython_frame
+ ],
+ )
+ def test_multifunc_numba_udf_frame(agg_kwargs, expected_func):
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+ data = DataFrame(
+ {
+ 0: ["a", "a", "b", "b", "a"],
+@@ -212,7 +213,7 @@ def test_multifunc_numba_udf_frame(agg_k
+ [{"func": ["min", "max"]}, {"func": "min"}, {"min_val": "min", "max_val": "max"}],
+ )
+ def test_multifunc_numba_vs_cython_series(agg_kwargs):
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+ labels = ["a", "a", "b", "b", "a"]
+ data = Series([1.0, 2.0, 3.0, 4.0, 5.0])
+ grouped = data.groupby(labels)
+@@ -265,7 +266,7 @@ def test_multifunc_numba_vs_cython_serie
+ strict=False,
+ )
+ def test_multifunc_numba_kwarg_propagation(data, agg_kwargs):
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+ labels = ["a", "a", "b", "b", "a"]
+ grouped = data.groupby(labels)
+ result = grouped.agg(**agg_kwargs, engine="numba", engine_kwargs={"parallel": True})
+@@ -278,7 +279,7 @@ def test_multifunc_numba_kwarg_propagati
+
+ def test_args_not_cached():
+ # GH 41647
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+
+ def sum_last(values, index, n):
+ return values[-n:].sum()
+@@ -296,7 +297,7 @@ def test_args_not_cached():
+
+ def test_index_data_correctly_passed():
+ # GH 43133
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+
+ def f(values, index):
+ return np.mean(index)
+@@ -312,7 +313,7 @@ def test_index_data_correctly_passed():
+ def test_engine_kwargs_not_cached():
+ # If the user passes a different set of engine_kwargs don't return the same
+ # jitted function
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+ nogil = True
+ parallel = False
+ nopython = True
+@@ -339,7 +340,7 @@ def test_engine_kwargs_not_cached():
+
+ @pytest.mark.filterwarnings("ignore")
+ def test_multiindex_one_key(nogil, parallel, nopython):
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+
+ def numba_func(values, index):
+ return 1
+@@ -354,7 +355,7 @@ def test_multiindex_one_key(nogil, paral
+
+
+ def test_multiindex_multi_key_not_supported(nogil, parallel, nopython):
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+
+ def numba_func(values, index):
+ return 1
+@@ -368,7 +369,7 @@ def test_multiindex_multi_key_not_suppor
+
+
+ def test_multilabel_numba_vs_cython(numba_supported_reductions):
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+ reduction, kwargs = numba_supported_reductions
+ df = DataFrame(
+ {
+@@ -389,7 +390,7 @@ def test_multilabel_numba_vs_cython(numb
+
+
+ def test_multilabel_udf_numba_vs_cython():
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+ df = DataFrame(
+ {
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
+--- a/pandas/tests/groupby/test_counting.py
++++ b/pandas/tests/groupby/test_counting.py
+@@ -4,6 +4,7 @@ from string import ascii_lowercase
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import (
+ DataFrame,
+ Index,
+@@ -385,7 +386,7 @@ def test_count_uses_size_on_exception():
+
+ def test_count_arrow_string_array(any_string_dtype):
+ # GH#54751
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = DataFrame(
+ {"a": [1, 2, 3], "b": Series(["a", "b", "a"], dtype=any_string_dtype)}
+ )
+--- a/pandas/tests/groupby/test_groupby.py
++++ b/pandas/tests/groupby/test_groupby.py
+@@ -2596,7 +2596,7 @@ def test_groupby_column_index_name_lost(
+ def test_groupby_duplicate_columns(infer_string):
+ # GH: 31735
+ if infer_string:
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = DataFrame(
+ {"A": ["f", "e", "g", "h"], "B": ["a", "b", "c", "d"], "C": [1, 2, 3, 4]}
+ ).astype(object)
+--- a/pandas/tests/groupby/test_numba.py
++++ b/pandas/tests/groupby/test_numba.py
+@@ -1,5 +1,6 @@
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import (
+ DataFrame,
+ Series,
+@@ -9,7 +10,7 @@ import pandas._testing as tm
+
+ pytestmark = pytest.mark.single_cpu
+
+-pytest.importorskip("numba")
++td.versioned_importorskip("numba")
+
+
+ @pytest.mark.filterwarnings("ignore")
+--- a/pandas/tests/groupby/test_reductions.py
++++ b/pandas/tests/groupby/test_reductions.py
+@@ -701,7 +701,7 @@ def test_groupby_min_max_categorical(fun
+ @pytest.mark.parametrize("func", ["min", "max"])
+ def test_min_empty_string_dtype(func):
+ # GH#55619
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ dtype = "string[pyarrow_numpy]"
+ df = DataFrame({"a": ["a"], "b": "a", "c": "a"}, dtype=dtype).iloc[:0]
+ result = getattr(df.groupby("a"), func)()
+--- a/pandas/tests/groupby/test_timegrouper.py
++++ b/pandas/tests/groupby/test_timegrouper.py
+@@ -10,6 +10,7 @@ import numpy as np
+ import pytest
+ import pytz
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+ DataFrame,
+@@ -944,7 +945,7 @@ class TestGroupBy:
+ def test_groupby_agg_numba_timegrouper_with_nat(
+ self, groupby_with_truncated_bingrouper
+ ):
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+
+ # See discussion in GH#43487
+ gb = groupby_with_truncated_bingrouper
+--- a/pandas/tests/groupby/transform/test_numba.py
++++ b/pandas/tests/groupby/transform/test_numba.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.errors import NumbaUtilError
+
+ from pandas import (
+@@ -14,7 +15,7 @@ pytestmark = pytest.mark.single_cpu
+
+
+ def test_correct_function_signature():
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+
+ def incorrect_function(x):
+ return x + 1
+@@ -31,7 +32,7 @@ def test_correct_function_signature():
+
+
+ def test_check_nopython_kwargs():
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+
+ def incorrect_function(values, index):
+ return values + 1
+@@ -53,7 +54,7 @@ def test_check_nopython_kwargs():
+ @pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])
+ @pytest.mark.parametrize("as_index", [True, False])
+ def test_numba_vs_cython(jit, pandas_obj, nogil, parallel, nopython, as_index):
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+
+ def func(values, index):
+ return values + 1
+@@ -84,7 +85,7 @@ def test_numba_vs_cython(jit, pandas_obj
+ @pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])
+ def test_cache(jit, pandas_obj, nogil, parallel, nopython):
+ # Test that the functions are cached correctly if we switch functions
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+
+ def func_1(values, index):
+ return values + 1
+@@ -121,7 +122,7 @@ def test_cache(jit, pandas_obj, nogil, p
+
+
+ def test_use_global_config():
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+
+ def func_1(values, index):
+ return values + 1
+@@ -141,7 +142,7 @@ def test_use_global_config():
+ "agg_func", [["min", "max"], "min", {"B": ["min", "max"], "C": "sum"}]
+ )
+ def test_string_cython_vs_numba(agg_func, numba_supported_reductions):
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+ agg_func, kwargs = numba_supported_reductions
+ data = DataFrame(
+ {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1]
+@@ -159,7 +160,7 @@ def test_string_cython_vs_numba(agg_func
+
+ def test_args_not_cached():
+ # GH 41647
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+
+ def sum_last(values, index, n):
+ return values[-n:].sum()
+@@ -177,7 +178,7 @@ def test_args_not_cached():
+
+ def test_index_data_correctly_passed():
+ # GH 43133
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+
+ def f(values, index):
+ return index - 1
+@@ -191,7 +192,7 @@ def test_index_data_correctly_passed():
+ def test_engine_kwargs_not_cached():
+ # If the user passes a different set of engine_kwargs don't return the same
+ # jitted function
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+ nogil = True
+ parallel = False
+ nopython = True
+@@ -218,7 +219,7 @@ def test_engine_kwargs_not_cached():
+
+ @pytest.mark.filterwarnings("ignore")
+ def test_multiindex_one_key(nogil, parallel, nopython):
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+
+ def numba_func(values, index):
+ return 1
+@@ -233,7 +234,7 @@ def test_multiindex_one_key(nogil, paral
+
+
+ def test_multiindex_multi_key_not_supported(nogil, parallel, nopython):
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+
+ def numba_func(values, index):
+ return 1
+@@ -247,7 +248,7 @@ def test_multiindex_multi_key_not_suppor
+
+
+ def test_multilabel_numba_vs_cython(numba_supported_reductions):
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+ reduction, kwargs = numba_supported_reductions
+ df = DataFrame(
+ {
+@@ -264,7 +265,7 @@ def test_multilabel_numba_vs_cython(numb
+
+
+ def test_multilabel_udf_numba_vs_cython():
+- pytest.importorskip("numba")
++ td.versioned_importorskip("numba")
+ df = DataFrame(
+ {
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
+--- a/pandas/tests/indexes/base_class/test_constructors.py
++++ b/pandas/tests/indexes/base_class/test_constructors.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+ Index,
+@@ -47,7 +48,7 @@ class TestIndexConstructor:
+
+ def test_index_string_inference(self):
+ # GH#54430
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ dtype = "string[pyarrow_numpy]"
+ expected = Index(["a", "b"], dtype=dtype)
+ with pd.option_context("future.infer_string", True):
+--- a/pandas/tests/indexes/base_class/test_reshape.py
++++ b/pandas/tests/indexes/base_class/test_reshape.py
+@@ -4,6 +4,7 @@ Tests for ndarray-like method on the bas
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import Index
+ import pandas._testing as tm
+
+@@ -58,7 +59,7 @@ class TestReshape:
+
+ def test_insert_none_into_string_numpy(self):
+ # GH#55365
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ index = Index(["a", "b", "c"], dtype="string[pyarrow_numpy]")
+ result = index.insert(-1, None)
+ expected = Index(["a", "b", None, "c"], dtype="string[pyarrow_numpy]")
+--- a/pandas/tests/indexes/multi/test_constructors.py
++++ b/pandas/tests/indexes/multi/test_constructors.py
+@@ -7,6 +7,7 @@ import itertools
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
+
+ import pandas as pd
+@@ -648,7 +649,7 @@ def test_from_frame():
+
+ def test_from_frame_missing_values_multiIndex():
+ # GH 39984
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+
+ df = pd.DataFrame(
+ {
+--- a/pandas/tests/indexes/numeric/test_indexing.py
++++ b/pandas/tests/indexes/numeric/test_indexing.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.errors import InvalidIndexError
+
+ from pandas import (
+@@ -385,7 +386,7 @@ class TestGetIndexer:
+ def test_get_indexer_masked_na_boolean(self, dtype):
+ # GH#39133
+ if dtype == "bool[pyarrow]":
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ idx = Index([True, False, NA], dtype=dtype)
+ result = idx.get_loc(False)
+ assert result == 1
+@@ -393,7 +394,7 @@ class TestGetIndexer:
+ assert result == 2
+
+ def test_get_indexer_arrow_dictionary_target(self):
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ target = Index(
+ ArrowExtensionArray(
+ pa.array([1, 2], type=pa.dictionary(pa.int8(), pa.int8()))
+--- a/pandas/tests/indexes/test_base.py
++++ b/pandas/tests/indexes/test_base.py
+@@ -1285,7 +1285,7 @@ class TestIndex:
+
+ def test_tab_complete_warning(self, ip):
+ # https://github.com/pandas-dev/pandas/issues/16409
+- pytest.importorskip("IPython", minversion="6.0.0")
++ td.versioned_importorskip("IPython", min_version="6.0.0")
+ from IPython.core.completer import provisionalcompleter
+
+ code = "import pandas as pd; idx = pd.Index([1, 2])"
+--- a/pandas/tests/indexing/test_datetime.py
++++ b/pandas/tests/indexing/test_datetime.py
+@@ -2,6 +2,7 @@ import re
+
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+ DataFrame,
+@@ -174,7 +175,7 @@ class TestDatetimeIndex:
+
+ def test_getitem_pyarrow_index(self, frame_or_series):
+ # GH 53644
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ obj = frame_or_series(
+ range(5),
+ index=date_range("2020", freq="D", periods=5).astype(
+--- a/pandas/tests/indexing/test_loc.py
++++ b/pandas/tests/indexing/test_loc.py
+@@ -1308,7 +1308,7 @@ class TestLocBaseIndependent:
+ @pytest.mark.parametrize("spmatrix_t", ["coo_matrix", "csc_matrix", "csr_matrix"])
+ @pytest.mark.parametrize("dtype", [np.int64, np.float64, complex])
+ def test_loc_getitem_range_from_spmatrix(self, spmatrix_t, dtype):
+- sp_sparse = pytest.importorskip("scipy.sparse")
++ sp_sparse = td.versioned_importorskip("scipy.sparse")
+
+ spmatrix_t = getattr(sp_sparse, spmatrix_t)
+
+@@ -1337,7 +1337,7 @@ class TestLocBaseIndependent:
+
+ def test_loc_getitem_sparse_frame(self):
+ # GH34687
+- sp_sparse = pytest.importorskip("scipy.sparse")
++ sp_sparse = td.versioned_importorskip("scipy.sparse")
+
+ df = DataFrame.sparse.from_spmatrix(sp_sparse.eye(5))
+ result = df.loc[range(2)]
+@@ -3078,7 +3078,7 @@ def test_loc_periodindex_3_levels():
+
+ def test_loc_setitem_pyarrow_strings():
+ # GH#52319
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = DataFrame(
+ {
+ "strings": Series(["A", "B", "C"], dtype="string[pyarrow]"),
+--- a/pandas/tests/interchange/test_impl.py
++++ b/pandas/tests/interchange/test_impl.py
+@@ -6,6 +6,7 @@ from datetime import (
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas._libs.tslibs import iNaT
+ from pandas.compat import (
+ is_ci_environment,
+@@ -67,7 +68,7 @@ def test_categorical_dtype(data, data_ca
+
+ def test_categorical_pyarrow():
+ # GH 49889
+- pa = pytest.importorskip("pyarrow", "11.0.0")
++ pa = td.versioned_importorskip("pyarrow", "11.0.0")
+
+ arr = ["Mon", "Tue", "Mon", "Wed", "Mon", "Thu", "Fri", "Sat", "Sun"]
+ table = pa.table({"weekday": pa.array(arr).dictionary_encode()})
+@@ -82,7 +83,7 @@ def test_categorical_pyarrow():
+
+ def test_empty_categorical_pyarrow():
+ # https://github.com/pandas-dev/pandas/issues/53077
+- pa = pytest.importorskip("pyarrow", "11.0.0")
++ pa = td.versioned_importorskip("pyarrow", "11.0.0")
+
+ arr = [None]
+ table = pa.table({"arr": pa.array(arr, "float64").dictionary_encode()})
+@@ -94,7 +95,7 @@ def test_empty_categorical_pyarrow():
+
+ def test_large_string_pyarrow():
+ # GH 52795
+- pa = pytest.importorskip("pyarrow", "11.0.0")
++ pa = td.versioned_importorskip("pyarrow", "11.0.0")
+
+ arr = ["Mon", "Tue"]
+ table = pa.table({"weekday": pa.array(arr, "large_string")})
+@@ -120,7 +121,7 @@ def test_large_string_pyarrow():
+ )
+ def test_bitmasks_pyarrow(offset, length, expected_values):
+ # GH 52795
+- pa = pytest.importorskip("pyarrow", "11.0.0")
++ pa = td.versioned_importorskip("pyarrow", "11.0.0")
+
+ arr = [3.3, None, 2.1]
+ table = pa.table({"arr": arr}).slice(offset, length)
+@@ -282,7 +283,7 @@ def test_categorical_to_numpy_dlpack():
+ @pytest.mark.parametrize("data", [{}, {"a": []}])
+ def test_empty_pyarrow(data):
+ # GH 53155
+- pytest.importorskip("pyarrow", "11.0.0")
++ td.versioned_importorskip("pyarrow", "11.0.0")
+ from pyarrow.interchange import from_dataframe as pa_from_dataframe
+
+ expected = pd.DataFrame(data)
+@@ -292,7 +293,7 @@ def test_empty_pyarrow(data):
+
+
+ def test_multi_chunk_pyarrow() -> None:
+- pa = pytest.importorskip("pyarrow", "11.0.0")
++ pa = td.versioned_importorskip("pyarrow", "11.0.0")
+ n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]])
+ names = ["n_legs"]
+ table = pa.table([n_legs], names=names)
+@@ -305,7 +306,7 @@ def test_multi_chunk_pyarrow() -> None:
+
+
+ def test_multi_chunk_column() -> None:
+- pytest.importorskip("pyarrow", "11.0.0")
++ td.versioned_importorskip("pyarrow", "11.0.0")
+ ser = pd.Series([1, 2, None], dtype="Int64[pyarrow]")
+ df = pd.concat([ser, ser], ignore_index=True).to_frame("a")
+ df_orig = df.copy()
+@@ -327,7 +328,7 @@ def test_multi_chunk_column() -> None:
+
+ def test_timestamp_ns_pyarrow():
+ # GH 56712
+- pytest.importorskip("pyarrow", "11.0.0")
++ td.versioned_importorskip("pyarrow", "11.0.0")
+ timestamp_args = {
+ "year": 2000,
+ "month": 1,
+@@ -362,7 +363,7 @@ def test_datetimetzdtype(tz, unit):
+
+ def test_interchange_from_non_pandas_tz_aware(request):
+ # GH 54239, 54287
+- pa = pytest.importorskip("pyarrow", "11.0.0")
++ pa = td.versioned_importorskip("pyarrow", "11.0.0")
+ import pyarrow.compute as pc
+
+ if is_platform_windows() and is_ci_environment():
+@@ -420,7 +421,7 @@ def test_empty_string_column():
+
+ def test_large_string():
+ # GH#56702
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = pd.DataFrame({"a": ["x"]}, dtype="large_string[pyarrow]")
+ result = pd.api.interchange.from_dataframe(df.__dataframe__())
+ expected = pd.DataFrame({"a": ["x"]}, dtype="object")
+@@ -500,7 +501,7 @@ def test_pandas_nullable_with_missing_va
+ ) -> None:
+ # https://github.com/pandas-dev/pandas/issues/57643
+ # https://github.com/pandas-dev/pandas/issues/57664
+- pa = pytest.importorskip("pyarrow", "11.0.0")
++ pa = td.versioned_importorskip("pyarrow", "11.0.0")
+ import pyarrow.interchange as pai
+
+ if expected_dtype == "timestamp[us, tz=Asia/Kathmandu]":
+@@ -562,7 +563,7 @@ def test_pandas_nullable_without_missing
+ data: list, dtype: str, expected_dtype: str
+ ) -> None:
+ # https://github.com/pandas-dev/pandas/issues/57643
+- pa = pytest.importorskip("pyarrow", "11.0.0")
++ pa = td.versioned_importorskip("pyarrow", "11.0.0")
+ import pyarrow.interchange as pai
+
+ if expected_dtype == "timestamp[us, tz=Asia/Kathmandu]":
+@@ -578,7 +579,7 @@ def test_pandas_nullable_without_missing
+
+ def test_string_validity_buffer() -> None:
+ # https://github.com/pandas-dev/pandas/issues/57761
+- pytest.importorskip("pyarrow", "11.0.0")
++ td.versioned_importorskip("pyarrow", "11.0.0")
+ df = pd.DataFrame({"a": ["x"]}, dtype="large_string[pyarrow]")
+ result = df.__dataframe__().get_column_by_name("a").get_buffers()["validity"]
+ assert result is None
+@@ -586,7 +587,7 @@ def test_string_validity_buffer() -> Non
+
+ def test_string_validity_buffer_no_missing() -> None:
+ # https://github.com/pandas-dev/pandas/issues/57762
+- pytest.importorskip("pyarrow", "11.0.0")
++ td.versioned_importorskip("pyarrow", "11.0.0")
+ df = pd.DataFrame({"a": ["x", None]}, dtype="large_string[pyarrow]")
+ validity = df.__dataframe__().get_column_by_name("a").get_buffers()["validity"]
+ assert validity is not None
+--- a/pandas/tests/interchange/test_utils.py
++++ b/pandas/tests/interchange/test_utils.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas.core.interchange.utils import dtype_to_arrow_c_fmt
+
+@@ -78,7 +79,7 @@ def test_dtype_to_arrow_c_fmt(pandas_dty
+ )
+ def test_dtype_to_arrow_c_fmt_arrowdtype(pa_dtype, args_kwargs, c_string):
+ # GH 52323
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ if not args_kwargs:
+ pa_type = getattr(pa, pa_dtype)()
+ elif isinstance(args_kwargs, tuple):
+--- a/pandas/tests/io/conftest.py
++++ b/pandas/tests/io/conftest.py
+@@ -58,8 +58,8 @@ def s3_base(worker_id, monkeypatch):
+ Sets up moto server in separate process locally
+ Return url for motoserver/moto CI service
+ """
+- pytest.importorskip("s3fs")
+- pytest.importorskip("boto3")
++ td.versioned_importorskip("s3fs")
++ td.versioned_importorskip("boto3")
+
+ # temporary workaround as moto fails for botocore >= 1.11 otherwise,
+ # see https://github.com/spulec/moto/issues/1924 & 1952
+@@ -80,9 +80,9 @@ def s3_base(worker_id, monkeypatch):
+ # set in .github/workflows/unit-tests.yml
+ yield "http://localhost:5000"
+ else:
+- requests = pytest.importorskip("requests")
+- pytest.importorskip("moto")
+- pytest.importorskip("flask") # server mode needs flask too
++ requests = td.versioned_importorskip("requests")
++ td.versioned_importorskip("moto")
++ td.versioned_importorskip("flask") # server mode needs flask too
+
+ # Launching moto in server mode, i.e., as a separate process
+ # with an S3 endpoint on localhost
+--- a/pandas/tests/io/excel/test_odf.py
++++ b/pandas/tests/io/excel/test_odf.py
+@@ -3,12 +3,13 @@ import functools
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.compat import is_platform_windows
+
+ import pandas as pd
+ import pandas._testing as tm
+
+-pytest.importorskip("odf")
++td.versioned_importorskip("odf")
+
+ if is_platform_windows():
+ pytestmark = pytest.mark.single_cpu
+--- a/pandas/tests/io/excel/test_odswriter.py
++++ b/pandas/tests/io/excel/test_odswriter.py
+@@ -6,6 +6,7 @@ import re
+
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.compat import is_platform_windows
+
+ import pandas as pd
+@@ -13,7 +14,7 @@ import pandas._testing as tm
+
+ from pandas.io.excel import ExcelWriter
+
+-odf = pytest.importorskip("odf")
++odf = td.versioned_importorskip("odf")
+
+ if is_platform_windows():
+ pytestmark = pytest.mark.single_cpu
+--- a/pandas/tests/io/excel/test_openpyxl.py
++++ b/pandas/tests/io/excel/test_openpyxl.py
+@@ -5,6 +5,7 @@ import re
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.compat import is_platform_windows
+
+ import pandas as pd
+@@ -17,7 +18,7 @@ from pandas.io.excel import (
+ )
+ from pandas.io.excel._openpyxl import OpenpyxlReader
+
+-openpyxl = pytest.importorskip("openpyxl")
++openpyxl = td.versioned_importorskip("openpyxl")
+
+ if is_platform_windows():
+ pytestmark = pytest.mark.single_cpu
+--- a/pandas/tests/io/excel/test_readers.py
++++ b/pandas/tests/io/excel/test_readers.py
+@@ -667,7 +667,7 @@ class TestReaders:
+ if read_ext in (".xlsb", ".xls"):
+ pytest.skip(f"No engine for filetype: '{read_ext}'")
+
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+
+ with pd.option_context("mode.string_storage", string_storage):
+ df = DataFrame(
+--- a/pandas/tests/io/excel/test_style.py
++++ b/pandas/tests/io/excel/test_style.py
+@@ -16,7 +16,7 @@ import pandas._testing as tm
+ from pandas.io.excel import ExcelWriter
+ from pandas.io.formats.excel import ExcelFormatter
+
+-pytest.importorskip("jinja2")
++td.versioned_importorskip("jinja2")
+ # jinja2 is currently required for Styler.__init__(). Technically Styler.to_excel
+ # could compute styles and render to excel without jinja2, since there is no
+ # 'template' file, but this needs the import error to delayed until render time.
+@@ -41,14 +41,14 @@ def assert_equal_cell_styles(cell1, cell
+ )
+ def test_styler_to_excel_unstyled(engine):
+ # compare DataFrame.to_excel and Styler.to_excel when no styles applied
+- pytest.importorskip(engine)
++ td.versioned_importorskip(engine)
+ df = DataFrame(np.random.default_rng(2).standard_normal((2, 2)))
+ with tm.ensure_clean(".xlsx") as path:
+ with ExcelWriter(path, engine=engine) as writer:
+ df.to_excel(writer, sheet_name="dataframe")
+ df.style.to_excel(writer, sheet_name="unstyled")
+
+- openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl
++ openpyxl = td.versioned_importorskip("openpyxl") # test loading only with openpyxl
+ with contextlib.closing(openpyxl.load_workbook(path)) as wb:
+ for col1, col2 in zip(wb["dataframe"].columns, wb["unstyled"].columns):
+ assert len(col1) == len(col2)
+@@ -133,7 +133,7 @@ shared_style_params = [
+ )
+ @pytest.mark.parametrize("css, attrs, expected", shared_style_params)
+ def test_styler_to_excel_basic(engine, css, attrs, expected):
+- pytest.importorskip(engine)
++ td.versioned_importorskip(engine)
+ df = DataFrame(np.random.default_rng(2).standard_normal((1, 1)))
+ styler = df.style.map(lambda x: css)
+
+@@ -142,7 +142,7 @@ def test_styler_to_excel_basic(engine, c
+ df.to_excel(writer, sheet_name="dataframe")
+ styler.to_excel(writer, sheet_name="styled")
+
+- openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl
++ openpyxl = td.versioned_importorskip("openpyxl") # test loading only with openpyxl
+ with contextlib.closing(openpyxl.load_workbook(path)) as wb:
+ # test unstyled data cell does not have expected styles
+ # test styled cell has expected styles
+@@ -164,7 +164,7 @@ def test_styler_to_excel_basic(engine, c
+ )
+ @pytest.mark.parametrize("css, attrs, expected", shared_style_params)
+ def test_styler_to_excel_basic_indexes(engine, css, attrs, expected):
+- pytest.importorskip(engine)
++ td.versioned_importorskip(engine)
+ df = DataFrame(np.random.default_rng(2).standard_normal((1, 1)))
+
+ styler = df.style
+@@ -181,7 +181,7 @@ def test_styler_to_excel_basic_indexes(e
+ null_styler.to_excel(writer, sheet_name="null_styled")
+ styler.to_excel(writer, sheet_name="styled")
+
+- openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl
++ openpyxl = td.versioned_importorskip("openpyxl") # test loading only with openpyxl
+ with contextlib.closing(openpyxl.load_workbook(path)) as wb:
+ # test null styled index cells does not have expected styles
+ # test styled cell has expected styles
+@@ -233,7 +233,7 @@ def test_styler_to_excel_border_style(en
+ attrs = ["border", "left", "style"]
+ expected = border_style
+
+- pytest.importorskip(engine)
++ td.versioned_importorskip(engine)
+ df = DataFrame(np.random.default_rng(2).standard_normal((1, 1)))
+ styler = df.style.map(lambda x: css)
+
+@@ -242,7 +242,7 @@ def test_styler_to_excel_border_style(en
+ df.to_excel(writer, sheet_name="dataframe")
+ styler.to_excel(writer, sheet_name="styled")
+
+- openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl
++ openpyxl = td.versioned_importorskip("openpyxl") # test loading only with openpyxl
+ with contextlib.closing(openpyxl.load_workbook(path)) as wb:
+ # test unstyled data cell does not have expected styles
+ # test styled cell has expected styles
+@@ -259,7 +259,7 @@ def test_styler_to_excel_border_style(en
+
+
+ def test_styler_custom_converter():
+- openpyxl = pytest.importorskip("openpyxl")
++ openpyxl = td.versioned_importorskip("openpyxl")
+
+ def custom_converter(css):
+ return {"font": {"color": {"rgb": "111222"}}}
+--- a/pandas/tests/io/excel/test_xlrd.py
++++ b/pandas/tests/io/excel/test_xlrd.py
+@@ -3,6 +3,7 @@ import io
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.compat import is_platform_windows
+
+ import pandas as pd
+@@ -11,7 +12,7 @@ import pandas._testing as tm
+ from pandas.io.excel import ExcelFile
+ from pandas.io.excel._base import inspect_excel_format
+
+-xlrd = pytest.importorskip("xlrd")
++xlrd = td.versioned_importorskip("xlrd")
+
+ if is_platform_windows():
+ pytestmark = pytest.mark.single_cpu
+--- a/pandas/tests/io/excel/test_xlsxwriter.py
++++ b/pandas/tests/io/excel/test_xlsxwriter.py
+@@ -2,6 +2,7 @@ import contextlib
+
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.compat import is_platform_windows
+
+ from pandas import DataFrame
+@@ -9,7 +10,7 @@ import pandas._testing as tm
+
+ from pandas.io.excel import ExcelWriter
+
+-xlsxwriter = pytest.importorskip("xlsxwriter")
++xlsxwriter = td.versioned_importorskip("xlsxwriter")
+
+ if is_platform_windows():
+ pytestmark = pytest.mark.single_cpu
+@@ -23,7 +24,7 @@ def ext():
+ def test_column_format(ext):
+ # Test that column formats are applied to cells. Test for issue #9167.
+ # Applicable to xlsxwriter only.
+- openpyxl = pytest.importorskip("openpyxl")
++ openpyxl = td.versioned_importorskip("openpyxl")
+
+ with tm.ensure_clean(ext) as path:
+ frame = DataFrame({"A": [123456, 123456], "B": [123456, 123456]})
+--- a/pandas/tests/io/formats/style/test_bar.py
++++ b/pandas/tests/io/formats/style/test_bar.py
+@@ -3,13 +3,14 @@ import io
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import (
+ NA,
+ DataFrame,
+ read_csv,
+ )
+
+-pytest.importorskip("jinja2")
++td.versioned_importorskip("jinja2")
+
+
+ def bar_grad(a=None, b=None, c=None, d=None):
+--- a/pandas/tests/io/formats/style/test_exceptions.py
++++ b/pandas/tests/io/formats/style/test_exceptions.py
+@@ -1,6 +1,7 @@
+ import pytest
+
+-jinja2 = pytest.importorskip("jinja2")
++import pandas.util._test_decorators as td
++jinja2 = td.versioned_importorskip("jinja2")
+
+ from pandas import (
+ DataFrame,
+--- a/pandas/tests/io/formats/style/test_format.py
++++ b/pandas/tests/io/formats/style/test_format.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import (
+ NA,
+ DataFrame,
+@@ -11,7 +12,7 @@ from pandas import (
+ option_context,
+ )
+
+-pytest.importorskip("jinja2")
++td.versioned_importorskip("jinja2")
+ from pandas.io.formats.style import Styler
+ from pandas.io.formats.style_render import _str_escape
+
+--- a/pandas/tests/io/formats/style/test_highlight.py
++++ b/pandas/tests/io/formats/style/test_highlight.py
+@@ -1,13 +1,14 @@
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import (
+ NA,
+ DataFrame,
+ IndexSlice,
+ )
+
+-pytest.importorskip("jinja2")
++td.versioned_importorskip("jinja2")
+
+ from pandas.io.formats.style import Styler
+
+--- a/pandas/tests/io/formats/style/test_html.py
++++ b/pandas/tests/io/formats/style/test_html.py
+@@ -6,13 +6,14 @@ from textwrap import (
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import (
+ DataFrame,
+ MultiIndex,
+ option_context,
+ )
+
+-jinja2 = pytest.importorskip("jinja2")
++jinja2 = td.versioned_importorskip("jinja2")
+ from pandas.io.formats.style import Styler
+
+
+--- a/pandas/tests/io/formats/style/test_matplotlib.py
++++ b/pandas/tests/io/formats/style/test_matplotlib.py
+@@ -3,14 +3,15 @@ import gc
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import (
+ DataFrame,
+ IndexSlice,
+ Series,
+ )
+
+-pytest.importorskip("matplotlib")
+-pytest.importorskip("jinja2")
++td.versioned_importorskip("matplotlib")
++td.versioned_importorskip("jinja2")
+
+ import matplotlib as mpl
+
+@@ -23,9 +24,9 @@ def mpl_cleanup():
+ # 1) Resets units registry
+ # 2) Resets rc_context
+ # 3) Closes all figures
+- mpl = pytest.importorskip("matplotlib")
+- mpl_units = pytest.importorskip("matplotlib.units")
+- plt = pytest.importorskip("matplotlib.pyplot")
++ mpl = td.versioned_importorskip("matplotlib")
++ mpl_units = td.versioned_importorskip("matplotlib.units")
++ plt = td.versioned_importorskip("matplotlib.pyplot")
+ orig_units_registry = mpl_units.registry.copy()
+ with mpl.rc_context():
+ mpl.use("template")
+--- a/pandas/tests/io/formats/style/test_non_unique.py
++++ b/pandas/tests/io/formats/style/test_non_unique.py
+@@ -2,12 +2,13 @@ from textwrap import dedent
+
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import (
+ DataFrame,
+ IndexSlice,
+ )
+
+-pytest.importorskip("jinja2")
++td.versioned_importorskip("jinja2")
+
+ from pandas.io.formats.style import Styler
+
+--- a/pandas/tests/io/formats/style/test_style.py
++++ b/pandas/tests/io/formats/style/test_style.py
+@@ -16,7 +16,7 @@ from pandas import (
+ import pandas._testing as tm
+ import pandas.util._test_decorators as td
+
+-jinja2 = pytest.importorskip("jinja2")
++jinja2 = td.versioned_importorskip("jinja2")
+ from pandas.io.formats.style import ( # isort:skip
+ Styler,
+ )
+--- a/pandas/tests/io/formats/style/test_to_latex.py
++++ b/pandas/tests/io/formats/style/test_to_latex.py
+@@ -3,6 +3,7 @@ from textwrap import dedent
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import (
+ DataFrame,
+ MultiIndex,
+@@ -10,7 +11,7 @@ from pandas import (
+ option_context,
+ )
+
+-pytest.importorskip("jinja2")
++td.versioned_importorskip("jinja2")
+ from pandas.io.formats.style import Styler
+ from pandas.io.formats.style_render import (
+ _parse_latex_cell_styles,
+--- a/pandas/tests/io/formats/style/test_to_string.py
++++ b/pandas/tests/io/formats/style/test_to_string.py
+@@ -2,12 +2,13 @@ from textwrap import dedent
+
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import (
+ DataFrame,
+ Series,
+ )
+
+-pytest.importorskip("jinja2")
++td.versioned_importorskip("jinja2")
+ from pandas.io.formats.style import Styler
+
+
+--- a/pandas/tests/io/formats/style/test_tooltip.py
++++ b/pandas/tests/io/formats/style/test_tooltip.py
+@@ -1,9 +1,10 @@
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import DataFrame
+
+-pytest.importorskip("jinja2")
++td.versioned_importorskip("jinja2")
+ from pandas.io.formats.style import Styler
+
+
+--- a/pandas/tests/io/formats/test_format.py
++++ b/pandas/tests/io/formats/test_format.py
+@@ -11,6 +11,7 @@ from shutil import get_terminal_size
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas._config import using_pyarrow_string_dtype
+
+ import pandas as pd
+@@ -2268,7 +2269,7 @@ def test_filepath_or_buffer_arg(
+ ):
+ df = DataFrame([data])
+ if method in ["to_latex"]: # uses styler implementation
+- pytest.importorskip("jinja2")
++ td.versioned_importorskip("jinja2")
+
+ if filepath_or_buffer_id not in ["string", "pathlike"] and encoding is not None:
+ with pytest.raises(
+@@ -2287,7 +2288,7 @@ def test_filepath_or_buffer_arg(
+ @pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"])
+ def test_filepath_or_buffer_bad_arg_raises(float_frame, method):
+ if method in ["to_latex"]: # uses styler implementation
+- pytest.importorskip("jinja2")
++ td.versioned_importorskip("jinja2")
+ msg = "buf is not a file name and it has no write method"
+ with pytest.raises(TypeError, match=msg):
+ getattr(float_frame, method)(buf=object())
+--- a/pandas/tests/io/formats/test_to_excel.py
++++ b/pandas/tests/io/formats/test_to_excel.py
+@@ -6,6 +6,7 @@ import string
+
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.errors import CSSWarning
+
+ import pandas._testing as tm
+@@ -336,7 +337,7 @@ def tests_css_named_colors_valid():
+
+
+ def test_css_named_colors_from_mpl_present():
+- mpl_colors = pytest.importorskip("matplotlib.colors")
++ mpl_colors = td.versioned_importorskip("matplotlib.colors")
+
+ pd_colors = CSSToExcelConverter.NAMED_COLORS
+ for name, color in mpl_colors.CSS4_COLORS.items():
+--- a/pandas/tests/io/formats/test_to_latex.py
++++ b/pandas/tests/io/formats/test_to_latex.py
+@@ -4,6 +4,7 @@ from textwrap import dedent
+
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+ DataFrame,
+@@ -11,7 +12,7 @@ from pandas import (
+ )
+ import pandas._testing as tm
+
+-pytest.importorskip("jinja2")
++td.versioned_importorskip("jinja2")
+
+
+ def _dedent(string):
+--- a/pandas/tests/io/formats/test_to_markdown.py
++++ b/pandas/tests/io/formats/test_to_markdown.py
+@@ -5,10 +5,11 @@ from io import (
+
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ import pandas._testing as tm
+
+-pytest.importorskip("tabulate")
++td.versioned_importorskip("tabulate")
+
+
+ def test_simple():
+--- a/pandas/tests/io/formats/test_to_string.py
++++ b/pandas/tests/io/formats/test_to_string.py
+@@ -10,6 +10,7 @@ from textwrap import dedent
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas._config import using_pyarrow_string_dtype
+
+ from pandas import (
+@@ -748,7 +749,7 @@ class TestDataFrameToString:
+
+ def test_to_string_string_dtype(self):
+ # GH#50099
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = DataFrame(
+ {"x": ["foo", "bar", "baz"], "y": ["a", "b", "c"], "z": [1, 2, 3]}
+ )
+--- a/pandas/tests/io/json/test_pandas.py
++++ b/pandas/tests/io/json/test_pandas.py
+@@ -2034,7 +2034,7 @@ class TestPandasContainer:
+ self, string_storage, dtype_backend, orient, using_infer_string
+ ):
+ # GH#50750
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ df = DataFrame(
+ {
+ "a": Series([1, np.nan, 3], dtype="Int64"),
+@@ -2056,7 +2056,7 @@ class TestPandasContainer:
+ string_array_na = StringArray(np.array(["a", "b", NA], dtype=np.object_))
+
+ elif dtype_backend == "pyarrow":
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ from pandas.arrays import ArrowExtensionArray
+
+ string_array = ArrowExtensionArray(pa.array(["a", "b", "c"]))
+@@ -2103,7 +2103,7 @@ class TestPandasContainer:
+ @pytest.mark.parametrize("orient", ["split", "records", "index"])
+ def test_read_json_nullable_series(self, string_storage, dtype_backend, orient):
+ # GH#50750
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ ser = Series([1, np.nan, 3], dtype="Int64")
+
+ out = ser.to_json(orient=orient)
+@@ -2147,7 +2147,7 @@ def test_pyarrow_engine_lines_false():
+
+
+ def test_json_roundtrip_string_inference(orient):
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = DataFrame(
+ [["a", "b"], ["c", "d"]], index=["row 1", "row 2"], columns=["col 1", "col 2"]
+ )
+--- a/pandas/tests/io/json/test_readlines.py
++++ b/pandas/tests/io/json/test_readlines.py
+@@ -5,6 +5,7 @@ from pathlib import Path
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+ DataFrame,
+@@ -28,7 +29,7 @@ def lines_json_df():
+ @pytest.fixture(params=["ujson", "pyarrow"])
+ def engine(request):
+ if request.param == "pyarrow":
+- pytest.importorskip("pyarrow.json")
++ td.versioned_importorskip("pyarrow.json")
+ return request.param
+
+
+--- a/pandas/tests/io/parser/conftest.py
++++ b/pandas/tests/io/parser/conftest.py
+@@ -4,6 +4,7 @@ import os
+
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.compat._optional import VERSIONS
+
+ from pandas import (
+@@ -135,7 +136,7 @@ def all_parsers(request):
+ """
+ parser = request.param()
+ if parser.engine == "pyarrow":
+- pytest.importorskip("pyarrow", VERSIONS["pyarrow"])
++ td.versioned_importorskip("pyarrow", VERSIONS["pyarrow"])
+ # Try finding a way to disable threads all together
+ # for more stable CI runs
+ import pyarrow
+--- a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
++++ b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
+@@ -8,6 +8,7 @@ from io import StringIO
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.errors import ParserWarning
+
+ import pandas as pd
+@@ -460,7 +461,7 @@ def test_dtype_backend_and_dtype(all_par
+
+ def test_dtype_backend_string(all_parsers, string_storage):
+ # GH#36712
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+
+ with pd.option_context("mode.string_storage", string_storage):
+ parser = all_parsers
+@@ -503,7 +504,7 @@ def test_dtype_backend_ea_dtype_specifie
+
+ def test_dtype_backend_pyarrow(all_parsers, request):
+ # GH#36712
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ parser = all_parsers
+
+ data = """a,b,c,d,e,f,g,h,i,j
+@@ -556,7 +557,7 @@ def test_ea_int_avoid_overflow(all_parse
+
+ def test_string_inference(all_parsers):
+ # GH#54430
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ dtype = "string[pyarrow_numpy]"
+
+ data = """a,b
+@@ -577,7 +578,7 @@ y,2
+ @pytest.mark.parametrize("dtype", ["O", object, "object", np.object_, str, np.str_])
+ def test_string_inference_object_dtype(all_parsers, dtype):
+ # GH#56047
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+
+ data = """a,b
+ x,a
+--- a/pandas/tests/io/parser/test_concatenate_chunks.py
++++ b/pandas/tests/io/parser/test_concatenate_chunks.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.errors import DtypeWarning
+
+ import pandas._testing as tm
+@@ -11,7 +12,7 @@ from pandas.io.parsers.c_parser_wrapper
+
+ def test_concatenate_chunks_pyarrow():
+ # GH#51876
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ chunks = [
+ {0: ArrowExtensionArray(pa.array([1.5, 2.5]))},
+ {0: ArrowExtensionArray(pa.array([1, 2]))},
+@@ -23,7 +24,7 @@ def test_concatenate_chunks_pyarrow():
+
+ def test_concatenate_chunks_pyarrow_strings():
+ # GH#51876
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ chunks = [
+ {0: ArrowExtensionArray(pa.array([1.5, 2.5]))},
+ {0: ArrowExtensionArray(pa.array(["a", "b"]))},
+--- a/pandas/tests/io/parser/test_network.py
++++ b/pandas/tests/io/parser/test_network.py
+@@ -80,7 +80,7 @@ class TestS3:
+ def test_parse_public_s3_bucket(self, s3_public_bucket_with_data, tips_df, s3so):
+ # more of an integration test due to the not-public contents portion
+ # can probably mock this though.
+- pytest.importorskip("s3fs")
++ td.versioned_importorskip("s3fs")
+ for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]:
+ df = read_csv(
+ f"s3://{s3_public_bucket_with_data.name}/tips.csv" + ext,
+@@ -93,7 +93,7 @@ class TestS3:
+
+ def test_parse_private_s3_bucket(self, s3_private_bucket_with_data, tips_df, s3so):
+ # Read public file from bucket with not-public contents
+- pytest.importorskip("s3fs")
++ td.versioned_importorskip("s3fs")
+ df = read_csv(
+ f"s3://{s3_private_bucket_with_data.name}/tips.csv", storage_options=s3so
+ )
+@@ -258,7 +258,7 @@ class TestS3:
+ def test_write_s3_parquet_fails(self, tips_df, s3so):
+ # GH 27679
+ # Attempting to write to an invalid S3 path should raise
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ import botocore
+
+ # GH 34087
+@@ -318,7 +318,7 @@ class TestS3:
+ self, s3_public_bucket_with_data, feather_file, s3so
+ ):
+ # GH 29055
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ expected = read_feather(feather_file)
+ res = read_feather(
+ f"s3://{s3_public_bucket_with_data.name}/simple_dataset.feather",
+--- a/pandas/tests/io/parser/test_python_parser_only.py
++++ b/pandas/tests/io/parser/test_python_parser_only.py
+@@ -17,6 +17,7 @@ from typing import TYPE_CHECKING
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.errors import (
+ ParserError,
+ ParserWarning,
+@@ -167,7 +168,7 @@ def test_decompression_regex_sep(python_
+ data = data.replace(b",", b"::")
+ expected = parser.read_csv(csv1)
+
+- module = pytest.importorskip(compression)
++ module = td.versioned_importorskip(compression)
+ klass = getattr(module, klass)
+
+ with tm.ensure_clean() as path:
+--- a/pandas/tests/io/parser/test_read_fwf.py
++++ b/pandas/tests/io/parser/test_read_fwf.py
+@@ -14,6 +14,7 @@ from pathlib import Path
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.errors import EmptyDataError
+
+ import pandas as pd
+@@ -972,13 +973,13 @@ def test_dtype_backend(string_storage, d
+ arr = StringArray(np.array(["a", "b"], dtype=np.object_))
+ arr_na = StringArray(np.array([pd.NA, "a"], dtype=np.object_))
+ elif dtype_backend == "pyarrow":
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ from pandas.arrays import ArrowExtensionArray
+
+ arr = ArrowExtensionArray(pa.array(["a", "b"]))
+ arr_na = ArrowExtensionArray(pa.array([None, "a"]))
+ else:
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ arr = ArrowStringArray(pa.array(["a", "b"]))
+ arr_na = ArrowStringArray(pa.array([None, "a"]))
+
+@@ -1002,7 +1003,7 @@ def test_dtype_backend(string_storage, d
+ }
+ )
+ if dtype_backend == "pyarrow":
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ from pandas.arrays import ArrowExtensionArray
+
+ expected = DataFrame(
+--- a/pandas/tests/io/parser/test_upcast.py
++++ b/pandas/tests/io/parser/test_upcast.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas._libs.parsers import (
+ _maybe_upcast,
+ na_values,
+@@ -87,7 +88,7 @@ def test_maybe_upcaste_all_nan():
+ @pytest.mark.parametrize("val", [na_values[np.object_], "c"])
+ def test_maybe_upcast_object(val, string_storage):
+ # GH#36712
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+
+ with pd.option_context("mode.string_storage", string_storage):
+ arr = np.array(["a", "b", val], dtype=np.object_)
+--- a/pandas/tests/io/pytables/common.py
++++ b/pandas/tests/io/pytables/common.py
+@@ -5,9 +5,10 @@ import tempfile
+
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.io.pytables import HDFStore
+
+-tables = pytest.importorskip("tables")
++tables = td.versioned_importorskip("tables")
+ # set these parameters so we don't have file sharing
+ tables.parameters.MAX_NUMEXPR_THREADS = 1
+ tables.parameters.MAX_BLOSC_THREADS = 1
+--- a/pandas/tests/io/pytables/test_append.py
++++ b/pandas/tests/io/pytables/test_append.py
+@@ -29,7 +29,7 @@ is_crashing_arch=bool((platform.uname()[
+
+ pytestmark = pytest.mark.single_cpu
+
+-tables = pytest.importorskip("tables")
++tables = td.versioned_importorskip("tables")
+
+
+ @pytest.mark.filterwarnings("ignore::tables.NaturalNameWarning")
+--- a/pandas/tests/io/pytables/test_compat.py
++++ b/pandas/tests/io/pytables/test_compat.py
+@@ -1,9 +1,10 @@
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ import pandas._testing as tm
+
+-tables = pytest.importorskip("tables")
++tables = td.versioned_importorskip("tables")
+
+
+ @pytest.fixture
+--- a/pandas/tests/io/pytables/test_read.py
++++ b/pandas/tests/io/pytables/test_read.py
+@@ -401,7 +401,7 @@ def test_read_py2_hdf_file_in_py3(datapa
+
+ def test_read_infer_string(tmp_path, setup_path):
+ # GH#54431
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = DataFrame({"a": ["a", "b", None]})
+ path = tmp_path / setup_path
+ df.to_hdf(path, key="data", format="table")
+--- a/pandas/tests/io/pytables/test_round_trip.py
++++ b/pandas/tests/io/pytables/test_round_trip.py
+@@ -565,7 +565,7 @@ def test_round_trip_equals(tmp_path, set
+
+ def test_infer_string_columns(tmp_path, setup_path):
+ # GH#
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ path = tmp_path / setup_path
+ with pd.option_context("future.infer_string", True):
+ df = DataFrame(1, columns=list("ABCD"), index=list(range(10))).set_index(
+--- a/pandas/tests/io/pytables/test_store.py
++++ b/pandas/tests/io/pytables/test_store.py
+@@ -7,6 +7,7 @@ import time
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+ DataFrame,
+@@ -37,7 +38,7 @@ is_crashing_arch=bool((platform.uname()[
+
+ pytestmark = pytest.mark.single_cpu
+
+-tables = pytest.importorskip("tables")
++tables = td.versioned_importorskip("tables")
+
+
+ def test_context(setup_path):
+--- a/pandas/tests/io/pytables/test_subclass.py
++++ b/pandas/tests/io/pytables/test_subclass.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import (
+ DataFrame,
+ Series,
+@@ -12,7 +13,7 @@ from pandas.io.pytables import (
+ read_hdf,
+ )
+
+-pytest.importorskip("tables")
++td.versioned_importorskip("tables")
+
+
+ class TestHDFStoreSubclass:
+--- a/pandas/tests/io/test_clipboard.py
++++ b/pandas/tests/io/test_clipboard.py
+@@ -3,6 +3,7 @@ from textwrap import dedent
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.errors import (
+ PyperclipException,
+ PyperclipWindowsException,
+@@ -353,14 +354,14 @@ class TestClipboard:
+ ):
+ # GH#50502
+ if string_storage == "pyarrow" or dtype_backend == "pyarrow":
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+
+ if string_storage == "python":
+ string_array = StringArray(np.array(["x", "y"], dtype=np.object_))
+ string_array_na = StringArray(np.array(["x", NA], dtype=np.object_))
+
+ elif dtype_backend == "pyarrow" and engine != "c":
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ from pandas.arrays import ArrowExtensionArray
+
+ string_array = ArrowExtensionArray(pa.array(["x", "y"]))
+--- a/pandas/tests/io/test_common.py
++++ b/pandas/tests/io/test_common.py
+@@ -100,7 +100,7 @@ bar2,12,13,14,15
+
+ def test_stringify_file_and_path_like(self):
+ # GH 38125: do not stringify file objects that are also path-like
+- fsspec = pytest.importorskip("fsspec")
++ fsspec = td.versioned_importorskip("fsspec")
+ with tm.ensure_clean() as path:
+ with fsspec.open(f"file://{path}", mode="wb") as fsspec_obj:
+ assert fsspec_obj == icom.stringify_path(fsspec_obj)
+@@ -153,7 +153,7 @@ Look,a snake,🐍"""
+
+ # Test that pyarrow can handle a file opened with get_handle
+ def test_get_handle_pyarrow_compat(self):
+- pa_csv = pytest.importorskip("pyarrow.csv")
++ pa_csv = td.versioned_importorskip("pyarrow.csv")
+
+ # Test latin1, ucs-2, and ucs-4 chars
+ data = """a,b,c
+@@ -196,7 +196,7 @@ Look,a snake,🐍"""
+ ],
+ )
+ def test_read_non_existent(self, reader, module, error_class, fn_ext):
+- pytest.importorskip(module)
++ td.versioned_importorskip(module)
+
+ path = os.path.join(HERE, "data", "does_not_exist." + fn_ext)
+ msg1 = rf"File (b')?.+does_not_exist\.{fn_ext}'? does not exist"
+@@ -234,7 +234,7 @@ Look,a snake,🐍"""
+ )
+ # NOTE: Missing parent directory for pd.DataFrame.to_hdf is handled by PyTables
+ def test_write_missing_parent_directory(self, method, module, error_class, fn_ext):
+- pytest.importorskip(module)
++ td.versioned_importorskip(module)
+
+ dummy_frame = pd.DataFrame({"a": [1, 2, 3], "b": [2, 3, 4], "c": [3, 4, 5]})
+
+@@ -264,7 +264,7 @@ Look,a snake,🐍"""
+ def test_read_expands_user_home_dir(
+ self, reader, module, error_class, fn_ext, monkeypatch
+ ):
+- pytest.importorskip(module)
++ td.versioned_importorskip(module)
+
+ path = os.path.join("~", "does_not_exist." + fn_ext)
+ monkeypatch.setattr(icom, "_expand_user", lambda x: os.path.join("foo", x))
+@@ -321,7 +321,7 @@ Look,a snake,🐍"""
+ ],
+ )
+ def test_read_fspath_all(self, reader, module, path, datapath):
+- pytest.importorskip(module)
++ td.versioned_importorskip(module)
+ path = datapath(*path)
+
+ mypath = CustomFSPath(path)
+@@ -349,13 +349,13 @@ Look,a snake,🐍"""
+ )
+ def test_write_fspath_all(self, writer_name, writer_kwargs, module):
+ if writer_name in ["to_latex"]: # uses Styler implementation
+- pytest.importorskip("jinja2")
++ td.versioned_importorskip("jinja2")
+ p1 = tm.ensure_clean("string")
+ p2 = tm.ensure_clean("fspath")
+ df = pd.DataFrame({"A": [1, 2]})
+
+ with p1 as string, p2 as fspath:
+- pytest.importorskip(module)
++ td.versioned_importorskip(module)
+ mypath = CustomFSPath(fspath)
+ writer = getattr(df, writer_name)
+
+@@ -377,7 +377,7 @@ Look,a snake,🐍"""
+ # Same test as write_fspath_all, except HDF5 files aren't
+ # necessarily byte-for-byte identical for a given dataframe, so we'll
+ # have to read and compare equality
+- pytest.importorskip("tables")
++ td.versioned_importorskip("tables")
+
+ df = pd.DataFrame({"A": [1, 2]})
+ p1 = tm.ensure_clean("string")
+--- a/pandas/tests/io/test_feather.py
++++ b/pandas/tests/io/test_feather.py
+@@ -2,6 +2,7 @@
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ import pandas._testing as tm
+ from pandas.core.arrays import (
+@@ -15,7 +16,7 @@ pytestmark = pytest.mark.filterwarnings(
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
+ )
+
+-pa = pytest.importorskip("pyarrow")
++pa = td.versioned_importorskip("pyarrow")
+
+
+ @pytest.mark.single_cpu
+--- a/pandas/tests/io/test_fsspec.py
++++ b/pandas/tests/io/test_fsspec.py
+@@ -25,7 +25,7 @@ pytestmark = pytest.mark.filterwarnings(
+
+ @pytest.fixture
+ def fsspectest():
+- pytest.importorskip("fsspec")
++ td.versioned_importorskip("fsspec")
+ from fsspec import register_implementation
+ from fsspec.implementations.memory import MemoryFileSystem
+ from fsspec.registry import _registry as registry
+@@ -59,7 +59,7 @@ def df1():
+
+ @pytest.fixture
+ def cleared_fs():
+- fsspec = pytest.importorskip("fsspec")
++ fsspec = td.versioned_importorskip("fsspec")
+
+ memfs = fsspec.filesystem("memory")
+ yield memfs
+@@ -99,7 +99,7 @@ def test_to_csv(cleared_fs, df1):
+
+
+ def test_to_excel(cleared_fs, df1):
+- pytest.importorskip("openpyxl")
++ td.versioned_importorskip("openpyxl")
+ ext = "xlsx"
+ path = f"memory://test/test.{ext}"
+ df1.to_excel(path, index=True)
+@@ -111,7 +111,7 @@ def test_to_excel(cleared_fs, df1):
+
+ @pytest.mark.parametrize("binary_mode", [False, True])
+ def test_to_csv_fsspec_object(cleared_fs, binary_mode, df1):
+- fsspec = pytest.importorskip("fsspec")
++ fsspec = td.versioned_importorskip("fsspec")
+
+ path = "memory://test/test.csv"
+ mode = "wb" if binary_mode else "w"
+@@ -153,7 +153,7 @@ def test_read_table_options(fsspectest):
+
+
+ def test_excel_options(fsspectest):
+- pytest.importorskip("openpyxl")
++ td.versioned_importorskip("openpyxl")
+ extension = "xlsx"
+
+ df = DataFrame({"a": [0]})
+@@ -168,7 +168,7 @@ def test_excel_options(fsspectest):
+
+ def test_to_parquet_new_file(cleared_fs, df1):
+ """Regression test for writing to a not-yet-existent GCS Parquet file."""
+- pytest.importorskip("fastparquet")
++ td.versioned_importorskip("fastparquet")
+
+ df1.to_parquet(
+ "memory://test/test.csv", index=True, engine="fastparquet", compression=None
+@@ -177,7 +177,7 @@ def test_to_parquet_new_file(cleared_fs,
+
+ def test_arrowparquet_options(fsspectest):
+ """Regression test for writing to a not-yet-existent GCS Parquet file."""
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = DataFrame({"a": [0]})
+ df.to_parquet(
+ "testmem://test/test.csv",
+@@ -197,7 +197,7 @@ def test_arrowparquet_options(fsspectest
+ @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) fastparquet
+ def test_fastparquet_options(fsspectest):
+ """Regression test for writing to a not-yet-existent GCS Parquet file."""
+- pytest.importorskip("fastparquet")
++ td.versioned_importorskip("fastparquet")
+
+ df = DataFrame({"a": [0]})
+ df.to_parquet(
+@@ -217,7 +217,7 @@ def test_fastparquet_options(fsspectest)
+
+ @pytest.mark.single_cpu
+ def test_from_s3_csv(s3_public_bucket_with_data, tips_file, s3so):
+- pytest.importorskip("s3fs")
++ td.versioned_importorskip("s3fs")
+ tm.assert_equal(
+ read_csv(
+ f"s3://{s3_public_bucket_with_data.name}/tips.csv", storage_options=s3so
+@@ -242,7 +242,7 @@ def test_from_s3_csv(s3_public_bucket_wi
+ @pytest.mark.single_cpu
+ @pytest.mark.parametrize("protocol", ["s3", "s3a", "s3n"])
+ def test_s3_protocols(s3_public_bucket_with_data, tips_file, protocol, s3so):
+- pytest.importorskip("s3fs")
++ td.versioned_importorskip("s3fs")
+ tm.assert_equal(
+ read_csv(
+ f"{protocol}://{s3_public_bucket_with_data.name}/tips.csv",
+@@ -255,8 +255,8 @@ def test_s3_protocols(s3_public_bucket_w
+ @pytest.mark.single_cpu
+ @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) fastparquet
+ def test_s3_parquet(s3_public_bucket, s3so, df1):
+- pytest.importorskip("fastparquet")
+- pytest.importorskip("s3fs")
++ td.versioned_importorskip("fastparquet")
++ td.versioned_importorskip("s3fs")
+
+ fn = f"s3://{s3_public_bucket.name}/test.parquet"
+ df1.to_parquet(
+@@ -274,7 +274,7 @@ def test_not_present_exception():
+
+
+ def test_feather_options(fsspectest):
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = DataFrame({"a": [0]})
+ df.to_feather("testmem://mockfile", storage_options={"test": "feather_write"})
+ assert fsspectest.test[0] == "feather_write"
+@@ -321,7 +321,7 @@ def test_stata_options(fsspectest):
+
+
+ def test_markdown_options(fsspectest):
+- pytest.importorskip("tabulate")
++ td.versioned_importorskip("tabulate")
+ df = DataFrame({"a": [0]})
+ df.to_markdown("testmem://mockfile", storage_options={"test": "md_write"})
+ assert fsspectest.test[0] == "md_write"
+@@ -329,7 +329,7 @@ def test_markdown_options(fsspectest):
+
+
+ def test_non_fsspec_options():
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ with pytest.raises(ValueError, match="storage_options"):
+ read_csv("localfile", storage_options={"a": True})
+ with pytest.raises(ValueError, match="storage_options"):
+--- a/pandas/tests/io/test_gcs.py
++++ b/pandas/tests/io/test_gcs.py
+@@ -29,8 +29,8 @@ pytestmark = pytest.mark.filterwarnings(
+ @pytest.fixture
+ def gcs_buffer():
+ """Emulate GCS using a binary buffer."""
+- pytest.importorskip("gcsfs")
+- fsspec = pytest.importorskip("fsspec")
++ td.versioned_importorskip("gcsfs")
++ fsspec = td.versioned_importorskip("fsspec")
+
+ gcs_buffer = BytesIO()
+ gcs_buffer.close = lambda: True
+@@ -83,8 +83,8 @@ def test_to_read_gcs(gcs_buffer, format,
+ df1.to_json(path)
+ df2 = read_json(path, convert_dates=["dt"])
+ elif format == "parquet":
+- pytest.importorskip("pyarrow")
+- pa_fs = pytest.importorskip("pyarrow.fs")
++ td.versioned_importorskip("pyarrow")
++ pa_fs = td.versioned_importorskip("pyarrow.fs")
+
+ class MockFileSystem(pa_fs.FileSystem):
+ @staticmethod
+@@ -107,7 +107,7 @@ def test_to_read_gcs(gcs_buffer, format,
+ captured = capsys.readouterr()
+ assert captured.out == "Using pyarrow filesystem\nUsing pyarrow filesystem\n"
+ elif format == "markdown":
+- pytest.importorskip("tabulate")
++ td.versioned_importorskip("tabulate")
+ df1.to_markdown(path)
+ df2 = df1
+
+@@ -196,8 +196,8 @@ def test_to_csv_compression_encoding_gcs
+
+ def test_to_parquet_gcs_new_file(monkeypatch, tmpdir):
+ """Regression test for writing to a not-yet-existent GCS Parquet file."""
+- pytest.importorskip("fastparquet")
+- pytest.importorskip("gcsfs")
++ td.versioned_importorskip("fastparquet")
++ td.versioned_importorskip("gcsfs")
+
+ from fsspec import AbstractFileSystem
+
+--- a/pandas/tests/io/test_html.py
++++ b/pandas/tests/io/test_html.py
+@@ -71,8 +71,8 @@ def assert_framelist_equal(list1, list2,
+
+
+ def test_bs4_version_fails(monkeypatch, datapath):
+- bs4 = pytest.importorskip("bs4")
+- pytest.importorskip("html5lib")
++ bs4 = td.versioned_importorskip("bs4")
++ td.versioned_importorskip("html5lib")
+
+ monkeypatch.setattr(bs4, "__version__", "4.2")
+ with pytest.raises(ImportError, match="Pandas requires version"):
+@@ -89,9 +89,9 @@ def test_invalid_flavor():
+
+
+ def test_same_ordering(datapath):
+- pytest.importorskip("bs4")
+- pytest.importorskip("lxml")
+- pytest.importorskip("html5lib")
++ td.versioned_importorskip("bs4")
++ td.versioned_importorskip("lxml")
++ td.versioned_importorskip("html5lib")
+
+ filename = datapath("io", "data", "html", "valid_markup.html")
+ dfs_lxml = read_html(filename, index_col=0, flavor=["lxml"])
+@@ -184,13 +184,13 @@ class TestReadHtml:
+ string_array = StringArray(np.array(["a", "b", "c"], dtype=np.object_))
+ string_array_na = StringArray(np.array(["a", "b", NA], dtype=np.object_))
+ elif dtype_backend == "pyarrow":
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ from pandas.arrays import ArrowExtensionArray
+
+ string_array = ArrowExtensionArray(pa.array(["a", "b", "c"]))
+ string_array_na = ArrowExtensionArray(pa.array(["a", "b", None]))
+ else:
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ string_array = ArrowStringArray(pa.array(["a", "b", "c"]))
+ string_array_na = ArrowStringArray(pa.array(["a", "b", None]))
+
+--- a/pandas/tests/io/test_http_headers.py
++++ b/pandas/tests/io/test_http_headers.py
+@@ -161,7 +161,7 @@ def test_to_parquet_to_disk_with_storage
+ "Auth": "other_custom",
+ }
+
+- pytest.importorskip(engine)
++ td.versioned_importorskip(engine)
+
+ true_df = pd.DataFrame({"column_name": ["column_value"]})
+ msg = (
+--- a/pandas/tests/io/test_orc.py
++++ b/pandas/tests/io/test_orc.py
+@@ -8,12 +8,13 @@ import pathlib
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import read_orc
+ import pandas._testing as tm
+ from pandas.core.arrays import StringArray
+
+-pytest.importorskip("pyarrow.orc")
++td.versioned_importorskip("pyarrow.orc")
+
+ import pyarrow as pa
+
+@@ -248,7 +249,7 @@ def test_orc_reader_snappy_compressed(di
+ def test_orc_roundtrip_file(dirpath):
+ # GH44554
+ # PyArrow gained ORC write support with the current argument order
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+
+ data = {
+ "boolean1": np.array([False, True], dtype="bool"),
+@@ -273,7 +274,7 @@ def test_orc_roundtrip_file(dirpath):
+ def test_orc_roundtrip_bytesio():
+ # GH44554
+ # PyArrow gained ORC write support with the current argument order
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+
+ data = {
+ "boolean1": np.array([False, True], dtype="bool"),
+@@ -297,7 +298,7 @@ def test_orc_roundtrip_bytesio():
+ def test_orc_writer_dtypes_not_supported(orc_writer_dtypes_not_supported):
+ # GH44554
+ # PyArrow gained ORC write support with the current argument order
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+
+ msg = "The dtype of one or more columns is not supported yet."
+ with pytest.raises(NotImplementedError, match=msg):
+@@ -305,7 +306,7 @@ def test_orc_writer_dtypes_not_supported
+
+
+ def test_orc_dtype_backend_pyarrow():
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = pd.DataFrame(
+ {
+ "string": list("abc"),
+@@ -341,7 +342,7 @@ def test_orc_dtype_backend_pyarrow():
+
+ def test_orc_dtype_backend_numpy_nullable():
+ # GH#50503
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = pd.DataFrame(
+ {
+ "string": list("abc"),
+--- a/pandas/tests/io/test_parquet.py
++++ b/pandas/tests/io/test_parquet.py
+@@ -8,6 +8,7 @@ import pathlib
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas._config import using_copy_on_write
+ from pandas._config.config import _get_option
+
+@@ -389,7 +390,7 @@ class Base:
+ @pytest.mark.single_cpu
+ def test_parquet_read_from_url(self, httpserver, datapath, df_compat, engine):
+ if engine != "auto":
+- pytest.importorskip(engine)
++ td.versioned_importorskip(engine)
+ with open(datapath("io", "data", "parquet", "simple.parquet"), mode="rb") as f:
+ httpserver.serve_content(content=f.read())
+ df = read_parquet(httpserver.url)
+@@ -611,7 +612,7 @@ class TestBasic(Base):
+ check_round_trip(df, engine)
+
+ def test_dtype_backend(self, engine, request):
+- pq = pytest.importorskip("pyarrow.parquet")
++ pq = td.versioned_importorskip("pyarrow.parquet")
+
+ if engine == "fastparquet":
+ # We are manually disabling fastparquet's
+@@ -799,7 +800,7 @@ class TestParquetPyArrow(Base):
+
+ @pytest.mark.single_cpu
+ def test_s3_roundtrip_explicit_fs(self, df_compat, s3_public_bucket, pa, s3so):
+- s3fs = pytest.importorskip("s3fs")
++ s3fs = td.versioned_importorskip("s3fs")
+ s3 = s3fs.S3FileSystem(**s3so)
+ kw = {"filesystem": s3}
+ check_round_trip(
+@@ -833,7 +834,7 @@ class TestParquetPyArrow(Base):
+ def test_s3_roundtrip_for_dir(
+ self, df_compat, s3_public_bucket, pa, partition_col, s3so
+ ):
+- pytest.importorskip("s3fs")
++ td.versioned_importorskip("s3fs")
+ # GH #26388
+ expected_df = df_compat.copy()
+
+@@ -862,14 +863,14 @@ class TestParquetPyArrow(Base):
+ )
+
+ def test_read_file_like_obj_support(self, df_compat):
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ buffer = BytesIO()
+ df_compat.to_parquet(buffer)
+ df_from_buf = read_parquet(buffer)
+ tm.assert_frame_equal(df_compat, df_from_buf)
+
+ def test_expand_user(self, df_compat, monkeypatch):
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ monkeypatch.setenv("HOME", "TestingUser")
+ monkeypatch.setenv("USERPROFILE", "TestingUser")
+ with pytest.raises(OSError, match=r".*TestingUser.*"):
+@@ -924,7 +925,7 @@ class TestParquetPyArrow(Base):
+ def test_additional_extension_arrays(self, pa):
+ # test additional ExtensionArrays that are supported through the
+ # __arrow_array__ protocol
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = pd.DataFrame(
+ {
+ "a": pd.Series([1, 2, 3], dtype="Int64"),
+@@ -939,7 +940,7 @@ class TestParquetPyArrow(Base):
+
+ def test_pyarrow_backed_string_array(self, pa, string_storage):
+ # test ArrowStringArray supported through the __arrow_array__ protocol
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = pd.DataFrame({"a": pd.Series(["a", None, "c"], dtype="string[pyarrow]")})
+ with pd.option_context("string_storage", string_storage):
+ check_round_trip(df, pa, expected=df.astype(f"string[{string_storage}]"))
+@@ -947,7 +948,7 @@ class TestParquetPyArrow(Base):
+ def test_additional_extension_types(self, pa):
+ # test additional ExtensionArrays that are supported through the
+ # __arrow_array__ protocol + by defining a custom ExtensionType
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = pd.DataFrame(
+ {
+ "c": pd.IntervalIndex.from_tuples([(0, 1), (1, 2), (3, 4)]),
+@@ -992,7 +993,7 @@ class TestParquetPyArrow(Base):
+
+ def test_filter_row_groups(self, pa):
+ # https://github.com/pandas-dev/pandas/issues/26551
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = pd.DataFrame({"a": list(range(3))})
+ with tm.ensure_clean() as path:
+ df.to_parquet(path, engine=pa)
+@@ -1349,7 +1350,7 @@ class TestParquetFastParquet(Base):
+ tm.assert_frame_equal(result, df)
+
+ def test_filesystem_notimplemented(self):
+- pytest.importorskip("fastparquet")
++ td.versioned_importorskip("fastparquet")
+ df = pd.DataFrame(data={"A": [0, 1], "B": [1, 0]})
+ with tm.ensure_clean() as path:
+ with pytest.raises(
+@@ -1365,7 +1366,7 @@ class TestParquetFastParquet(Base):
+ read_parquet(path, engine="fastparquet", filesystem="foo")
+
+ def test_invalid_filesystem(self):
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = pd.DataFrame(data={"A": [0, 1], "B": [1, 0]})
+ with tm.ensure_clean() as path:
+ with pytest.raises(
+@@ -1381,7 +1382,7 @@ class TestParquetFastParquet(Base):
+ read_parquet(path, engine="pyarrow", filesystem="foo")
+
+ def test_unsupported_pa_filesystem_storage_options(self):
+- pa_fs = pytest.importorskip("pyarrow.fs")
++ pa_fs = td.versioned_importorskip("pyarrow.fs")
+ df = pd.DataFrame(data={"A": [0, 1], "B": [1, 0]})
+ with tm.ensure_clean() as path:
+ with pytest.raises(
+--- a/pandas/tests/io/test_pickle.py
++++ b/pandas/tests/io/test_pickle.py
+@@ -499,7 +499,7 @@ def test_pickle_generalurl_read(monkeypa
+
+
+ def test_pickle_fsspec_roundtrip():
+- pytest.importorskip("fsspec")
++ td.versioned_importorskip("fsspec")
+ with tm.ensure_clean():
+ mockurl = "memory://mockfile"
+ df = DataFrame(
+--- a/pandas/tests/io/test_s3.py
++++ b/pandas/tests/io/test_s3.py
+@@ -2,13 +2,14 @@ from io import BytesIO
+
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import read_csv
+
+
+ def test_streaming_s3_objects():
+ # GH17135
+ # botocore gained iteration support in 1.10.47, can now be used in read_*
+- pytest.importorskip("botocore", minversion="1.10.47")
++ td.versioned_importorskip("botocore", min_version="1.10.47")
+ from botocore.response import StreamingBody
+
+ data = [b"foo,bar,baz\n1,2,3\n4,5,6\n", b"just,the,header\n"]
+@@ -20,7 +21,7 @@ def test_streaming_s3_objects():
+ @pytest.mark.single_cpu
+ def test_read_without_creds_from_pub_bucket(s3_public_bucket_with_data, s3so):
+ # GH 34626
+- pytest.importorskip("s3fs")
++ td.versioned_importorskip("s3fs")
+ result = read_csv(
+ f"s3://{s3_public_bucket_with_data.name}/tips.csv",
+ nrows=3,
+@@ -33,7 +34,7 @@ def test_read_without_creds_from_pub_buc
+ def test_read_with_creds_from_pub_bucket(s3_public_bucket_with_data, s3so):
+ # Ensure we can read from a public bucket with credentials
+ # GH 34626
+- pytest.importorskip("s3fs")
++ td.versioned_importorskip("s3fs")
+ df = read_csv(
+ f"s3://{s3_public_bucket_with_data.name}/tips.csv",
+ nrows=5,
+--- a/pandas/tests/io/test_spss.py
++++ b/pandas/tests/io/test_spss.py
+@@ -4,11 +4,12 @@ from pathlib import Path
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ import pandas._testing as tm
+ from pandas.util.version import Version
+
+-pyreadstat = pytest.importorskip("pyreadstat")
++pyreadstat = td.versioned_importorskip("pyreadstat")
+
+
+ # TODO(CoW) - detection of chained assignment in cython
+@@ -101,7 +102,7 @@ def test_spss_umlauts_dtype_backend(data
+ expected = pd.DataFrame({"var1": [1.0, 2.0, 1.0, 3.0]}, dtype="Int64")
+
+ if dtype_backend == "pyarrow":
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+
+ from pandas.arrays import ArrowExtensionArray
+
+--- a/pandas/tests/io/test_sql.py
++++ b/pandas/tests/io/test_sql.py
+@@ -601,8 +601,8 @@ def drop_view(
+
+ @pytest.fixture
+ def mysql_pymysql_engine():
+- sqlalchemy = pytest.importorskip("sqlalchemy")
+- pymysql = pytest.importorskip("pymysql")
++ sqlalchemy = td.versioned_importorskip("sqlalchemy")
++ pymysql = td.versioned_importorskip("pymysql")
+ engine = sqlalchemy.create_engine(
+ "mysql+pymysql://root@localhost:3306/pandas",
+ connect_args={"client_flag": pymysql.constants.CLIENT.MULTI_STATEMENTS},
+@@ -649,8 +649,8 @@ def mysql_pymysql_conn_types(mysql_pymys
+
+ @pytest.fixture
+ def postgresql_psycopg2_engine():
+- sqlalchemy = pytest.importorskip("sqlalchemy")
+- pytest.importorskip("psycopg2")
++ sqlalchemy = td.versioned_importorskip("sqlalchemy")
++ td.versioned_importorskip("psycopg2")
+ engine = sqlalchemy.create_engine(
+ "postgresql+psycopg2://postgres:postgres@localhost:5432/pandas",
+ poolclass=sqlalchemy.pool.NullPool,
+@@ -684,7 +684,7 @@ def postgresql_psycopg2_conn(postgresql_
+
+ @pytest.fixture
+ def postgresql_adbc_conn():
+- pytest.importorskip("adbc_driver_postgresql")
++ td.versioned_importorskip("adbc_driver_postgresql")
+ from adbc_driver_postgresql import dbapi
+
+ uri = "postgresql://postgres:postgres@localhost:5432/pandas"
+@@ -747,14 +747,14 @@ def postgresql_psycopg2_conn_types(postg
+
+ @pytest.fixture
+ def sqlite_str():
+- pytest.importorskip("sqlalchemy")
++ td.versioned_importorskip("sqlalchemy")
+ with tm.ensure_clean() as name:
+ yield f"sqlite:///{name}"
+
+
+ @pytest.fixture
+ def sqlite_engine(sqlite_str):
+- sqlalchemy = pytest.importorskip("sqlalchemy")
++ sqlalchemy = td.versioned_importorskip("sqlalchemy")
+ engine = sqlalchemy.create_engine(sqlite_str, poolclass=sqlalchemy.pool.NullPool)
+ yield engine
+ for view in get_all_views(engine):
+@@ -772,7 +772,7 @@ def sqlite_conn(sqlite_engine):
+
+ @pytest.fixture
+ def sqlite_str_iris(sqlite_str, iris_path):
+- sqlalchemy = pytest.importorskip("sqlalchemy")
++ sqlalchemy = td.versioned_importorskip("sqlalchemy")
+ engine = sqlalchemy.create_engine(sqlite_str)
+ create_and_load_iris(engine, iris_path)
+ create_and_load_iris_view(engine)
+@@ -795,7 +795,7 @@ def sqlite_conn_iris(sqlite_engine_iris)
+
+ @pytest.fixture
+ def sqlite_str_types(sqlite_str, types_data):
+- sqlalchemy = pytest.importorskip("sqlalchemy")
++ sqlalchemy = td.versioned_importorskip("sqlalchemy")
+ engine = sqlalchemy.create_engine(sqlite_str)
+ create_and_load_types(engine, types_data, "sqlite")
+ engine.dispose()
+@@ -816,7 +816,7 @@ def sqlite_conn_types(sqlite_engine_type
+
+ @pytest.fixture
+ def sqlite_adbc_conn():
+- pytest.importorskip("adbc_driver_sqlite")
++ td.versioned_importorskip("adbc_driver_sqlite")
+ from adbc_driver_sqlite import dbapi
+
+ with tm.ensure_clean() as name:
+@@ -1001,7 +1001,7 @@ def test_dataframe_to_sql_empty(conn, te
+ @pytest.mark.parametrize("conn", all_connectable)
+ def test_dataframe_to_sql_arrow_dtypes(conn, request):
+ # GH 52046
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = DataFrame(
+ {
+ "int": pd.array([1], dtype="int8[pyarrow]"),
+@@ -1035,7 +1035,7 @@ def test_dataframe_to_sql_arrow_dtypes(c
+ @pytest.mark.parametrize("conn", all_connectable)
+ def test_dataframe_to_sql_arrow_dtypes_missing(conn, request, nulls_fixture):
+ # GH 52046
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = DataFrame(
+ {
+ "datetime": pd.array(
+@@ -2515,7 +2515,7 @@ def test_sqlalchemy_integer_overload_map
+
+ @pytest.mark.parametrize("conn", all_connectable)
+ def test_database_uri_string(conn, request, test_frame1):
+- pytest.importorskip("sqlalchemy")
++ td.versioned_importorskip("sqlalchemy")
+ conn = request.getfixturevalue(conn)
+ # Test read_sql and .to_sql method with a database URI (GH10654)
+ # db_uri = 'sqlite:///:memory:' # raises
+@@ -2537,7 +2537,7 @@ def test_database_uri_string(conn, reque
+ @td.skip_if_installed("pg8000")
+ @pytest.mark.parametrize("conn", all_connectable)
+ def test_pg8000_sqlalchemy_passthrough_error(conn, request):
+- pytest.importorskip("sqlalchemy")
++ td.versioned_importorskip("sqlalchemy")
+ conn = request.getfixturevalue(conn)
+ # using driver that will not be installed on CI to trigger error
+ # in sqlalchemy.create_engine -> test passing of this error to user
+@@ -3414,7 +3414,7 @@ def test_to_sql_with_negative_npinf(conn
+ # The input {"foo": [-np.inf], "infe0": ["bar"]} does not raise any error
+ # for pymysql version >= 0.10
+ # TODO(GH#36465): remove this version check after GH 36465 is fixed
+- pymysql = pytest.importorskip("pymysql")
++ pymysql = td.versioned_importorskip("pymysql")
+
+ if Version(pymysql.__version__) < Version("1.0.3") and "infe0" in df.columns:
+ mark = pytest.mark.xfail(reason="GH 36465")
+@@ -3529,7 +3529,7 @@ def test_options_auto(conn, request, tes
+
+
+ def test_options_get_engine():
+- pytest.importorskip("sqlalchemy")
++ td.versioned_importorskip("sqlalchemy")
+ assert isinstance(get_engine("sqlalchemy"), SQLAlchemyEngine)
+
+ with pd.option_context("io.sql.engine", "sqlalchemy"):
+@@ -3681,14 +3681,14 @@ def dtype_backend_expected():
+ string_array_na = StringArray(np.array(["a", "b", pd.NA], dtype=np.object_))
+
+ elif dtype_backend == "pyarrow":
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ from pandas.arrays import ArrowExtensionArray
+
+ string_array = ArrowExtensionArray(pa.array(["a", "b", "c"])) # type: ignore[assignment]
+ string_array_na = ArrowExtensionArray(pa.array(["a", "b", None])) # type: ignore[assignment]
+
+ else:
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ string_array = ArrowStringArray(pa.array(["a", "b", "c"]))
+ string_array_na = ArrowStringArray(pa.array(["a", "b", None]))
+
+@@ -3705,7 +3705,7 @@ def dtype_backend_expected():
+ }
+ )
+ if dtype_backend == "pyarrow":
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+
+ from pandas.arrays import ArrowExtensionArray
+
+@@ -3850,7 +3850,7 @@ def test_row_object_is_named_tuple(sqlit
+ def test_read_sql_string_inference(sqlite_engine):
+ conn = sqlite_engine
+ # GH#54430
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ table = "test"
+ df = DataFrame({"a": ["x", "y"]})
+ df.to_sql(table, con=conn, index=False, if_exists="replace")
+--- a/pandas/tests/io/test_stata.py
++++ b/pandas/tests/io/test_stata.py
+@@ -2045,11 +2045,11 @@ def test_compression(compression, versio
+ with bz2.open(path, "rb") as comp:
+ fp = io.BytesIO(comp.read())
+ elif compression == "zstd":
+- zstd = pytest.importorskip("zstandard")
++ zstd = td.versioned_importorskip("zstandard")
+ with zstd.open(path, "rb") as comp:
+ fp = io.BytesIO(comp.read())
+ elif compression == "xz":
+- lzma = pytest.importorskip("lzma")
++ lzma = td.versioned_importorskip("lzma")
+ with lzma.open(path, "rb") as comp:
+ fp = io.BytesIO(comp.read())
+ elif compression is None:
+--- a/pandas/tests/io/xml/test_to_xml.py
++++ b/pandas/tests/io/xml/test_to_xml.py
+@@ -867,7 +867,7 @@ def test_encoding_option_str(xml_baby_na
+
+
+ def test_correct_encoding_file(xml_baby_names):
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ df_file = read_xml(xml_baby_names, encoding="ISO-8859-1", parser="lxml")
+
+ with tm.ensure_clean("test.xml") as path:
+@@ -876,7 +876,7 @@ def test_correct_encoding_file(xml_baby_
+
+ @pytest.mark.parametrize("encoding", ["UTF-8", "UTF-16", "ISO-8859-1"])
+ def test_wrong_encoding_option_lxml(xml_baby_names, parser, encoding):
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ df_file = read_xml(xml_baby_names, encoding="ISO-8859-1", parser="lxml")
+
+ with tm.ensure_clean("test.xml") as path:
+@@ -892,7 +892,7 @@ def test_misspelled_encoding(parser, geo
+
+
+ def test_xml_declaration_pretty_print(geom_df):
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ expected = """\
+ <data>
+ <row>
+@@ -1005,7 +1005,7 @@ xsl_expected = """\
+
+
+ def test_stylesheet_file_like(xsl_row_field_output, mode, geom_df):
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ with open(
+ xsl_row_field_output, mode, encoding="utf-8" if mode == "r" else None
+ ) as f:
+@@ -1015,7 +1015,7 @@ def test_stylesheet_file_like(xsl_row_fi
+ def test_stylesheet_io(xsl_row_field_output, mode, geom_df):
+ # note: By default the bodies of untyped functions are not checked,
+ # consider using --check-untyped-defs
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ xsl_obj: BytesIO | StringIO # type: ignore[annotation-unchecked]
+
+ with open(
+@@ -1032,7 +1032,7 @@ def test_stylesheet_io(xsl_row_field_out
+
+
+ def test_stylesheet_buffered_reader(xsl_row_field_output, mode, geom_df):
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ with open(
+ xsl_row_field_output, mode, encoding="utf-8" if mode == "r" else None
+ ) as f:
+@@ -1044,7 +1044,7 @@ def test_stylesheet_buffered_reader(xsl_
+
+
+ def test_stylesheet_wrong_path(geom_df):
+- lxml_etree = pytest.importorskip("lxml.etree")
++ lxml_etree = td.versioned_importorskip("lxml.etree")
+
+ xsl = os.path.join("data", "xml", "row_field_output.xslt")
+
+@@ -1057,7 +1057,7 @@ def test_stylesheet_wrong_path(geom_df):
+
+ @pytest.mark.parametrize("val", ["", b""])
+ def test_empty_string_stylesheet(val, geom_df):
+- lxml_etree = pytest.importorskip("lxml.etree")
++ lxml_etree = td.versioned_importorskip("lxml.etree")
+
+ msg = "|".join(
+ [
+@@ -1073,7 +1073,7 @@ def test_empty_string_stylesheet(val, ge
+
+
+ def test_incorrect_xsl_syntax(geom_df):
+- lxml_etree = pytest.importorskip("lxml.etree")
++ lxml_etree = td.versioned_importorskip("lxml.etree")
+
+ xsl = """\
+ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
+@@ -1103,7 +1103,7 @@ def test_incorrect_xsl_syntax(geom_df):
+
+
+ def test_incorrect_xsl_eval(geom_df):
+- lxml_etree = pytest.importorskip("lxml.etree")
++ lxml_etree = td.versioned_importorskip("lxml.etree")
+
+ xsl = """\
+ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
+@@ -1131,7 +1131,7 @@ def test_incorrect_xsl_eval(geom_df):
+
+
+ def test_incorrect_xsl_apply(geom_df):
+- lxml_etree = pytest.importorskip("lxml.etree")
++ lxml_etree = td.versioned_importorskip("lxml.etree")
+
+ xsl = """\
+ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
+@@ -1169,7 +1169,7 @@ def test_stylesheet_with_etree(geom_df):
+
+
+ def test_style_to_csv(geom_df):
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ xsl = """\
+ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
+ <xsl:output method="text" indent="yes" />
+@@ -1198,7 +1198,7 @@ def test_style_to_csv(geom_df):
+
+
+ def test_style_to_string(geom_df):
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ xsl = """\
+ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
+ <xsl:output method="text" indent="yes" />
+@@ -1232,7 +1232,7 @@ def test_style_to_string(geom_df):
+
+
+ def test_style_to_json(geom_df):
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ xsl = """\
+ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
+ <xsl:output method="text" indent="yes" />
+@@ -1363,8 +1363,8 @@ def test_unsuported_compression(parser,
+
+ @pytest.mark.single_cpu
+ def test_s3_permission_output(parser, s3_public_bucket, geom_df):
+- s3fs = pytest.importorskip("s3fs")
+- pytest.importorskip("lxml")
++ s3fs = td.versioned_importorskip("s3fs")
++ td.versioned_importorskip("lxml")
+
+ with tm.external_error_raised((PermissionError, FileNotFoundError)):
+ fs = s3fs.S3FileSystem(anon=True)
+--- a/pandas/tests/io/xml/test_xml.py
++++ b/pandas/tests/io/xml/test_xml.py
+@@ -249,7 +249,7 @@ df_kml = DataFrame(
+
+ def test_literal_xml_deprecation():
+ # GH 53809
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ msg = (
+ "Passing literal xml to 'read_xml' is deprecated and "
+ "will be removed in a future version. To read from a "
+@@ -289,7 +289,7 @@ def read_xml_iterparse_comp(comp_path, c
+
+
+ def test_parser_consistency_file(xml_books):
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ df_file_lxml = read_xml(xml_books, parser="lxml")
+ df_file_etree = read_xml(xml_books, parser="etree")
+
+@@ -462,7 +462,7 @@ def test_file_handle_close(xml_books, pa
+
+ @pytest.mark.parametrize("val", ["", b""])
+ def test_empty_string_lxml(val):
+- lxml_etree = pytest.importorskip("lxml.etree")
++ lxml_etree = td.versioned_importorskip("lxml.etree")
+
+ msg = "|".join(
+ [
+@@ -505,7 +505,7 @@ def test_wrong_file_path(parser, datapat
+ @pytest.mark.network
+ @pytest.mark.single_cpu
+ def test_url(httpserver, xml_file):
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ with open(xml_file, encoding="utf-8") as f:
+ httpserver.serve_content(content=f.read())
+ df_url = read_xml(httpserver.url, xpath=".//book[count(*)=4]")
+@@ -587,7 +587,7 @@ def test_whitespace(parser):
+
+
+ def test_empty_xpath_lxml(xml_books):
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ with pytest.raises(ValueError, match=("xpath does not return any nodes")):
+ read_xml(xml_books, xpath=".//python", parser="lxml")
+
+@@ -600,7 +600,7 @@ def test_bad_xpath_etree(xml_books):
+
+
+ def test_bad_xpath_lxml(xml_books):
+- lxml_etree = pytest.importorskip("lxml.etree")
++ lxml_etree = td.versioned_importorskip("lxml.etree")
+
+ with pytest.raises(lxml_etree.XPathEvalError, match=("Invalid expression")):
+ read_xml(xml_books, xpath=".//[book]", parser="lxml")
+@@ -659,7 +659,7 @@ def test_prefix_namespace(parser):
+
+
+ def test_consistency_default_namespace():
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ df_lxml = read_xml(
+ StringIO(xml_default_nmsp),
+ xpath=".//ns:row",
+@@ -678,7 +678,7 @@ def test_consistency_default_namespace()
+
+
+ def test_consistency_prefix_namespace():
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ df_lxml = read_xml(
+ StringIO(xml_prefix_nmsp),
+ xpath=".//doc:row",
+@@ -710,7 +710,7 @@ def test_missing_prefix_definition_etree
+
+
+ def test_missing_prefix_definition_lxml(kml_cta_rail_lines):
+- lxml_etree = pytest.importorskip("lxml.etree")
++ lxml_etree = td.versioned_importorskip("lxml.etree")
+
+ with pytest.raises(lxml_etree.XPathEvalError, match=("Undefined namespace prefix")):
+ read_xml(kml_cta_rail_lines, xpath=".//kml:Placemark", parser="lxml")
+@@ -718,7 +718,7 @@ def test_missing_prefix_definition_lxml(
+
+ @pytest.mark.parametrize("key", ["", None])
+ def test_none_namespace_prefix(key):
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ with pytest.raises(
+ TypeError, match=("empty namespace prefix is not supported in XPath")
+ ):
+@@ -831,7 +831,7 @@ def test_empty_elems_only(parser):
+
+
+ def test_attribute_centric_xml():
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ xml = """\
+ <?xml version="1.0" encoding="UTF-8"?>
+ <TrainSchedule>
+@@ -1061,7 +1061,7 @@ def test_ascii_encoding(xml_baby_names,
+
+
+ def test_parser_consistency_with_encoding(xml_baby_names):
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ df_xpath_lxml = read_xml(xml_baby_names, parser="lxml", encoding="ISO-8859-1")
+ df_xpath_etree = read_xml(xml_baby_names, parser="etree", encoding="iso-8859-1")
+
+@@ -1084,7 +1084,7 @@ def test_parser_consistency_with_encodin
+
+
+ def test_wrong_encoding_for_lxml():
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ # GH#45133
+ data = """<data>
+ <row>
+@@ -1131,7 +1131,7 @@ def test_wrong_parser(xml_books):
+
+
+ def test_stylesheet_file(kml_cta_rail_lines, xsl_flatten_doc):
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ df_style = read_xml(
+ kml_cta_rail_lines,
+ xpath=".//k:Placemark",
+@@ -1158,7 +1158,7 @@ def test_stylesheet_file(kml_cta_rail_li
+
+
+ def test_stylesheet_file_like(kml_cta_rail_lines, xsl_flatten_doc, mode):
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ with open(xsl_flatten_doc, mode, encoding="utf-8" if mode == "r" else None) as f:
+ df_style = read_xml(
+ kml_cta_rail_lines,
+@@ -1173,7 +1173,7 @@ def test_stylesheet_file_like(kml_cta_ra
+ def test_stylesheet_io(kml_cta_rail_lines, xsl_flatten_doc, mode):
+ # note: By default the bodies of untyped functions are not checked,
+ # consider using --check-untyped-defs
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ xsl_obj: BytesIO | StringIO # type: ignore[annotation-unchecked]
+
+ with open(xsl_flatten_doc, mode, encoding="utf-8" if mode == "r" else None) as f:
+@@ -1193,7 +1193,7 @@ def test_stylesheet_io(kml_cta_rail_line
+
+
+ def test_stylesheet_buffered_reader(kml_cta_rail_lines, xsl_flatten_doc, mode):
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ with open(xsl_flatten_doc, mode, encoding="utf-8" if mode == "r" else None) as f:
+ xsl_obj = f.read()
+
+@@ -1208,7 +1208,7 @@ def test_stylesheet_buffered_reader(kml_
+
+
+ def test_style_charset():
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ xml = "<中文標籤><row><c1>1</c1><c2>2</c2></row></中文標籤>"
+
+ xsl = """\
+@@ -1237,7 +1237,7 @@ def test_style_charset():
+
+
+ def test_not_stylesheet(kml_cta_rail_lines, xml_books):
+- lxml_etree = pytest.importorskip("lxml.etree")
++ lxml_etree = td.versioned_importorskip("lxml.etree")
+
+ with pytest.raises(
+ lxml_etree.XSLTParseError, match=("document is not a stylesheet")
+@@ -1246,7 +1246,7 @@ def test_not_stylesheet(kml_cta_rail_lin
+
+
+ def test_incorrect_xsl_syntax(kml_cta_rail_lines):
+- lxml_etree = pytest.importorskip("lxml.etree")
++ lxml_etree = td.versioned_importorskip("lxml.etree")
+
+ xsl = """\
+ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+@@ -1275,7 +1275,7 @@ def test_incorrect_xsl_syntax(kml_cta_ra
+
+
+ def test_incorrect_xsl_eval(kml_cta_rail_lines):
+- lxml_etree = pytest.importorskip("lxml.etree")
++ lxml_etree = td.versioned_importorskip("lxml.etree")
+
+ xsl = """\
+ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+@@ -1302,7 +1302,7 @@ def test_incorrect_xsl_eval(kml_cta_rail
+
+
+ def test_incorrect_xsl_apply(kml_cta_rail_lines):
+- lxml_etree = pytest.importorskip("lxml.etree")
++ lxml_etree = td.versioned_importorskip("lxml.etree")
+
+ xsl = """\
+ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
+@@ -1321,7 +1321,7 @@ def test_incorrect_xsl_apply(kml_cta_rai
+
+
+ def test_wrong_stylesheet(kml_cta_rail_lines, xml_data_path):
+- xml_etree = pytest.importorskip("lxml.etree")
++ xml_etree = td.versioned_importorskip("lxml.etree")
+
+ xsl = xml_data_path / "flatten.xsl"
+
+@@ -1335,7 +1335,7 @@ def test_wrong_stylesheet(kml_cta_rail_l
+ def test_stylesheet_file_close(kml_cta_rail_lines, xsl_flatten_doc, mode):
+ # note: By default the bodies of untyped functions are not checked,
+ # consider using --check-untyped-defs
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ xsl_obj: BytesIO | StringIO # type: ignore[annotation-unchecked]
+
+ with open(xsl_flatten_doc, mode, encoding="utf-8" if mode == "r" else None) as f:
+@@ -1350,7 +1350,7 @@ def test_stylesheet_file_close(kml_cta_r
+
+
+ def test_stylesheet_with_etree(kml_cta_rail_lines, xsl_flatten_doc):
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ with pytest.raises(
+ ValueError, match=("To use stylesheet, you need lxml installed")
+ ):
+@@ -1359,7 +1359,7 @@ def test_stylesheet_with_etree(kml_cta_r
+
+ @pytest.mark.parametrize("val", ["", b""])
+ def test_empty_stylesheet(val, datapath):
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ msg = (
+ "Passing literal xml to 'read_xml' is deprecated and "
+ "will be removed in a future version. To read from a "
+@@ -1662,7 +1662,7 @@ def test_empty_data(xml_books, parser):
+
+
+ def test_online_stylesheet():
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("lxml")
+ xml = """\
+ <?xml version="1.0" encoding="UTF-8"?>
+ <catalog>
+@@ -1993,8 +1993,8 @@ def test_unsuported_compression(parser):
+ @pytest.mark.network
+ @pytest.mark.single_cpu
+ def test_s3_parser_consistency(s3_public_bucket_with_data, s3so):
+- pytest.importorskip("s3fs")
+- pytest.importorskip("lxml")
++ td.versioned_importorskip("s3fs")
++ td.versioned_importorskip("lxml")
+ s3 = f"s3://{s3_public_bucket_with_data.name}/books.xml"
+
+ df_lxml = read_xml(s3, parser="lxml", storage_options=s3so)
+@@ -2035,7 +2035,7 @@ def test_read_xml_nullable_dtypes(
+ </data>"""
+
+ if using_infer_string:
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ string_array = ArrowStringArrayNumpySemantics(pa.array(["x", "y"]))
+ string_array_na = ArrowStringArrayNumpySemantics(pa.array(["x", None]))
+
+@@ -2044,14 +2044,14 @@ def test_read_xml_nullable_dtypes(
+ string_array_na = StringArray(np.array(["x", NA], dtype=np.object_))
+
+ elif dtype_backend == "pyarrow":
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ from pandas.arrays import ArrowExtensionArray
+
+ string_array = ArrowExtensionArray(pa.array(["x", "y"]))
+ string_array_na = ArrowExtensionArray(pa.array(["x", None]))
+
+ else:
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ string_array = ArrowStringArray(pa.array(["x", "y"]))
+ string_array_na = ArrowStringArray(pa.array(["x", None]))
+
+@@ -2073,7 +2073,7 @@ def test_read_xml_nullable_dtypes(
+ )
+
+ if dtype_backend == "pyarrow":
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ from pandas.arrays import ArrowExtensionArray
+
+ expected = DataFrame(
+--- a/pandas/tests/plotting/conftest.py
++++ b/pandas/tests/plotting/conftest.py
+@@ -3,6 +3,7 @@ import gc
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import (
+ DataFrame,
+ to_datetime,
+@@ -15,9 +16,9 @@ def mpl_cleanup():
+ # 1) Resets units registry
+ # 2) Resets rc_context
+ # 3) Closes all figures
+- mpl = pytest.importorskip("matplotlib")
+- mpl_units = pytest.importorskip("matplotlib.units")
+- plt = pytest.importorskip("matplotlib.pyplot")
++ mpl = td.versioned_importorskip("matplotlib")
++ mpl_units = td.versioned_importorskip("matplotlib.units")
++ plt = td.versioned_importorskip("matplotlib.pyplot")
+ orig_units_registry = mpl_units.registry.copy()
+ with mpl.rc_context():
+ mpl.use("template")
+--- a/pandas/tests/plotting/frame/test_frame.py
++++ b/pandas/tests/plotting/frame/test_frame.py
+@@ -48,8 +48,8 @@ from pandas.util.version import Version
+
+ from pandas.io.formats.printing import pprint_thing
+
+-mpl = pytest.importorskip("matplotlib")
+-plt = pytest.importorskip("matplotlib.pyplot")
++mpl = td.versioned_importorskip("matplotlib")
++plt = td.versioned_importorskip("matplotlib.pyplot")
+
+
+ class TestDataFramePlots:
+@@ -1119,7 +1119,7 @@ class TestDataFramePlots:
+ _check_box_return_type(result, return_type)
+
+ def test_kde_df(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = DataFrame(np.random.default_rng(2).standard_normal((100, 4)))
+ ax = _check_plot_works(df.plot, kind="kde")
+ expected = [pprint_thing(c) for c in df.columns]
+@@ -1127,13 +1127,13 @@ class TestDataFramePlots:
+ _check_ticks_props(ax, xrot=0)
+
+ def test_kde_df_rot(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
+ ax = df.plot(kind="kde", rot=20, fontsize=5)
+ _check_ticks_props(ax, xrot=20, xlabelsize=5, ylabelsize=5)
+
+ def test_kde_df_subplots(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
+ axes = _check_plot_works(
+ df.plot,
+@@ -1144,13 +1144,13 @@ class TestDataFramePlots:
+ _check_axes_shape(axes, axes_num=4, layout=(4, 1))
+
+ def test_kde_df_logy(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
+ axes = df.plot(kind="kde", logy=True, subplots=True)
+ _check_ax_scales(axes, yaxis="log")
+
+ def test_kde_missing_vals(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = DataFrame(np.random.default_rng(2).uniform(size=(100, 4)))
+ df.loc[0, 0] = np.nan
+ _check_plot_works(df.plot, kind="kde")
+@@ -1447,14 +1447,14 @@ class TestDataFramePlots:
+
+ @pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds)
+ def test_kind_both_ways(self, kind):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = DataFrame({"x": [1, 2, 3]})
+ df.plot(kind=kind)
+ getattr(df.plot, kind)()
+
+ @pytest.mark.parametrize("kind", ["scatter", "hexbin"])
+ def test_kind_both_ways_x_y(self, kind):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = DataFrame({"x": [1, 2, 3]})
+ df.plot("x", "x", kind=kind)
+ getattr(df.plot, kind)("x", "x")
+@@ -2100,7 +2100,7 @@ class TestDataFramePlots:
+ @pytest.mark.parametrize("kind", plotting.PlotAccessor._all_kinds)
+ def test_memory_leak(self, kind):
+ """Check that every plot type gets properly collected."""
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ args = {}
+ if kind in ["hexbin", "scatter", "pie"]:
+ df = DataFrame(
+@@ -2427,7 +2427,7 @@ class TestDataFramePlots:
+ "kind", ("line", "bar", "barh", "hist", "kde", "density", "area", "pie")
+ )
+ def test_group_subplot(self, kind):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ d = {
+ "a": np.arange(10),
+ "b": np.arange(10) + 1,
+--- a/pandas/tests/plotting/frame/test_frame_color.py
++++ b/pandas/tests/plotting/frame/test_frame_color.py
+@@ -4,6 +4,7 @@ import re
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import DataFrame
+ import pandas._testing as tm
+@@ -14,9 +15,9 @@ from pandas.tests.plotting.common import
+ )
+ from pandas.util.version import Version
+
+-mpl = pytest.importorskip("matplotlib")
+-plt = pytest.importorskip("matplotlib.pyplot")
+-cm = pytest.importorskip("matplotlib.cm")
++mpl = td.versioned_importorskip("matplotlib")
++plt = td.versioned_importorskip("matplotlib.pyplot")
++cm = td.versioned_importorskip("matplotlib.cm")
+
+
+ def _check_colors_box(bp, box_c, whiskers_c, medians_c, caps_c="k", fliers_c=None):
+@@ -446,7 +447,7 @@ class TestDataFrameColor:
+ _check_colors(ax.patches[::10], facecolors=["green"] * 5)
+
+ def test_kde_colors(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ custom_colors = "rgcby"
+ df = DataFrame(np.random.default_rng(2).random((5, 5)))
+
+@@ -455,14 +456,14 @@ class TestDataFrameColor:
+
+ @pytest.mark.parametrize("colormap", ["jet", cm.jet])
+ def test_kde_colors_cmap(self, colormap):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
+ ax = df.plot.kde(colormap=colormap)
+ rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
+ _check_colors(ax.get_lines(), linecolors=rgba_colors)
+
+ def test_kde_colors_and_styles_subplots(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ default_colors = _unpack_cycler(mpl.pyplot.rcParams)
+
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
+@@ -473,14 +474,14 @@ class TestDataFrameColor:
+
+ @pytest.mark.parametrize("colormap", ["k", "red"])
+ def test_kde_colors_and_styles_subplots_single_col_str(self, colormap):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
+ axes = df.plot(kind="kde", color=colormap, subplots=True)
+ for ax in axes:
+ _check_colors(ax.get_lines(), linecolors=[colormap])
+
+ def test_kde_colors_and_styles_subplots_custom_color(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
+ custom_colors = "rgcby"
+ axes = df.plot(kind="kde", color=custom_colors, subplots=True)
+@@ -489,7 +490,7 @@ class TestDataFrameColor:
+
+ @pytest.mark.parametrize("colormap", ["jet", cm.jet])
+ def test_kde_colors_and_styles_subplots_cmap(self, colormap):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
+ rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
+ axes = df.plot(kind="kde", colormap=colormap, subplots=True)
+@@ -497,7 +498,7 @@ class TestDataFrameColor:
+ _check_colors(ax.get_lines(), linecolors=[c])
+
+ def test_kde_colors_and_styles_subplots_single_col(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
+ # make color a list if plotting one column frame
+ # handles cases like df.plot(color='DodgerBlue')
+@@ -505,7 +506,7 @@ class TestDataFrameColor:
+ _check_colors(axes[0].lines, linecolors=["DodgerBlue"])
+
+ def test_kde_colors_and_styles_subplots_single_char(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
+ # list of styles
+ # single character style
+@@ -514,7 +515,7 @@ class TestDataFrameColor:
+ _check_colors(ax.get_lines(), linecolors=["r"])
+
+ def test_kde_colors_and_styles_subplots_list(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
+ # list of styles
+ styles = list("rgcby")
+--- a/pandas/tests/plotting/frame/test_frame_groupby.py
++++ b/pandas/tests/plotting/frame/test_frame_groupby.py
+@@ -2,10 +2,11 @@
+
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import DataFrame
+ from pandas.tests.plotting.common import _check_visible
+
+-pytest.importorskip("matplotlib")
++td.versioned_importorskip("matplotlib")
+
+
+ class TestDataFramePlotsGroupby:
+--- a/pandas/tests/plotting/frame/test_frame_legend.py
++++ b/pandas/tests/plotting/frame/test_frame_legend.py
+@@ -14,7 +14,7 @@ from pandas.tests.plotting.common import
+ )
+ from pandas.util.version import Version
+
+-mpl = pytest.importorskip("matplotlib")
++mpl = td.versioned_importorskip("matplotlib")
+
+
+ class TestFrameLegend:
+@@ -61,7 +61,7 @@ class TestFrameLegend:
+
+ @pytest.mark.parametrize("kind", ["line", "bar", "barh", "kde", "area", "hist"])
+ def test_df_legend_labels(self, kind):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = DataFrame(np.random.default_rng(2).random((3, 3)), columns=["a", "b", "c"])
+ df2 = DataFrame(
+ np.random.default_rng(2).random((3, 3)), columns=["d", "e", "f"]
+@@ -87,7 +87,7 @@ class TestFrameLegend:
+ _check_legend_labels(ax, labels=expected)
+
+ def test_df_legend_labels_secondary_y(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = DataFrame(np.random.default_rng(2).random((3, 3)), columns=["a", "b", "c"])
+ df2 = DataFrame(
+ np.random.default_rng(2).random((3, 3)), columns=["d", "e", "f"]
+@@ -105,7 +105,7 @@ class TestFrameLegend:
+
+ def test_df_legend_labels_time_series(self):
+ # Time Series
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ ind = date_range("1/1/2014", periods=3)
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((3, 3)),
+@@ -131,7 +131,7 @@ class TestFrameLegend:
+
+ def test_df_legend_labels_time_series_scatter(self):
+ # Time Series
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ ind = date_range("1/1/2014", periods=3)
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((3, 3)),
+@@ -157,7 +157,7 @@ class TestFrameLegend:
+ _check_legend_labels(ax, labels=["data1", "data3"])
+
+ def test_df_legend_labels_time_series_no_mutate(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ ind = date_range("1/1/2014", periods=3)
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((3, 3)),
+--- a/pandas/tests/plotting/frame/test_frame_subplots.py
++++ b/pandas/tests/plotting/frame/test_frame_subplots.py
+@@ -6,6 +6,7 @@ import numpy as np
+ from numpy.testing import assert_array_almost_equal_nulp
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.compat import is_platform_linux
+ from pandas.compat.numpy import np_version_gte1p24
+
+@@ -27,8 +28,8 @@ from pandas.tests.plotting.common import
+
+ from pandas.io.formats.printing import pprint_thing
+
+-mpl = pytest.importorskip("matplotlib")
+-plt = pytest.importorskip("matplotlib.pyplot")
++mpl = td.versioned_importorskip("matplotlib")
++plt = td.versioned_importorskip("matplotlib.pyplot")
+
+
+ class TestDataFramePlotsSubplots:
+--- a/pandas/tests/plotting/frame/test_hist_box_by.py
++++ b/pandas/tests/plotting/frame/test_hist_box_by.py
+@@ -3,6 +3,7 @@ import re
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import DataFrame
+ import pandas._testing as tm
+ from pandas.tests.plotting.common import (
+@@ -12,7 +13,7 @@ from pandas.tests.plotting.common import
+ get_y_axis,
+ )
+
+-pytest.importorskip("matplotlib")
++td.versioned_importorskip("matplotlib")
+
+
+ @pytest.fixture
+--- a/pandas/tests/plotting/test_boxplot_method.py
++++ b/pandas/tests/plotting/test_boxplot_method.py
+@@ -6,6 +6,7 @@ import string
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import (
+ DataFrame,
+ MultiIndex,
+@@ -25,8 +26,8 @@ from pandas.tests.plotting.common import
+
+ from pandas.io.formats.printing import pprint_thing
+
+-mpl = pytest.importorskip("matplotlib")
+-plt = pytest.importorskip("matplotlib.pyplot")
++mpl = td.versioned_importorskip("matplotlib")
++plt = td.versioned_importorskip("matplotlib.pyplot")
+
+
+ def _check_ax_limits(col, ax):
+--- a/pandas/tests/plotting/test_common.py
++++ b/pandas/tests/plotting/test_common.py
+@@ -1,5 +1,6 @@
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import DataFrame
+ from pandas.tests.plotting.common import (
+ _check_plot_works,
+@@ -7,7 +8,7 @@ from pandas.tests.plotting.common import
+ _gen_two_subplots,
+ )
+
+-plt = pytest.importorskip("matplotlib.pyplot")
++plt = td.versioned_importorskip("matplotlib.pyplot")
+
+
+ class TestCommon:
+--- a/pandas/tests/plotting/test_converter.py
++++ b/pandas/tests/plotting/test_converter.py
+@@ -8,6 +8,7 @@ import sys
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas._config.config as cf
+
+ from pandas._libs.tslibs import to_offset
+@@ -41,8 +42,8 @@ except ImportError:
+ # causing an improper skip
+ pass
+
+-pytest.importorskip("matplotlib.pyplot")
+-dates = pytest.importorskip("matplotlib.dates")
++td.versioned_importorskip("matplotlib.pyplot")
++dates = td.versioned_importorskip("matplotlib.dates")
+
+
+ @pytest.mark.single_cpu
+@@ -79,7 +80,7 @@ class TestRegistration:
+ assert subprocess.check_call(call) == 0
+
+ def test_registering_no_warning(self):
+- plt = pytest.importorskip("matplotlib.pyplot")
++ plt = td.versioned_importorskip("matplotlib.pyplot")
+ s = Series(range(12), index=date_range("2017", periods=12))
+ _, ax = plt.subplots()
+
+@@ -89,7 +90,7 @@ class TestRegistration:
+ plt.close()
+
+ def test_pandas_plots_register(self):
+- plt = pytest.importorskip("matplotlib.pyplot")
++ plt = td.versioned_importorskip("matplotlib.pyplot")
+ s = Series(range(12), index=date_range("2017", periods=12))
+ # Set to the "warn" state, in case this isn't the first test run
+ with tm.assert_produces_warning(None) as w:
+@@ -101,7 +102,7 @@ class TestRegistration:
+ plt.close()
+
+ def test_matplotlib_formatters(self):
+- units = pytest.importorskip("matplotlib.units")
++ units = td.versioned_importorskip("matplotlib.units")
+
+ # Can't make any assertion about the start state.
+ # We we check that toggling converters off removes it, and toggling it
+@@ -113,9 +114,9 @@ class TestRegistration:
+ assert Timestamp in units.registry
+
+ def test_option_no_warning(self):
+- pytest.importorskip("matplotlib.pyplot")
++ td.versioned_importorskip("matplotlib.pyplot")
+ ctx = cf.option_context("plotting.matplotlib.register_converters", False)
+- plt = pytest.importorskip("matplotlib.pyplot")
++ plt = td.versioned_importorskip("matplotlib.pyplot")
+ s = Series(range(12), index=date_range("2017", periods=12))
+ _, ax = plt.subplots()
+
+@@ -130,8 +131,8 @@ class TestRegistration:
+ plt.close()
+
+ def test_registry_resets(self):
+- units = pytest.importorskip("matplotlib.units")
+- dates = pytest.importorskip("matplotlib.dates")
++ units = td.versioned_importorskip("matplotlib.units")
++ dates = td.versioned_importorskip("matplotlib.dates")
+
+ # make a copy, to reset to
+ original = dict(units.registry)
+--- a/pandas/tests/plotting/test_datetimelike.py
++++ b/pandas/tests/plotting/test_datetimelike.py
+@@ -10,6 +10,7 @@ import pickle
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas._libs.tslibs import (
+ BaseOffset,
+ to_offset,
+@@ -41,7 +42,7 @@ from pandas.tests.plotting.common import
+
+ from pandas.tseries.offsets import WeekOfMonth
+
+-mpl = pytest.importorskip("matplotlib")
++mpl = td.versioned_importorskip("matplotlib")
+
+
+ class TestTSPlot:
+@@ -737,7 +738,7 @@ class TestTSPlot:
+ assert ax.get_yaxis().get_visible()
+
+ def test_secondary_kde(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ ser = Series(np.random.default_rng(2).standard_normal(10))
+ fig, ax = mpl.pyplot.subplots()
+ ax = ser.plot(secondary_y=True, kind="density", ax=ax)
+--- a/pandas/tests/plotting/test_groupby.py
++++ b/pandas/tests/plotting/test_groupby.py
+@@ -4,6 +4,7 @@
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import (
+ DataFrame,
+ Index,
+@@ -14,7 +15,7 @@ from pandas.tests.plotting.common import
+ _check_legend_labels,
+ )
+
+-pytest.importorskip("matplotlib")
++td.versioned_importorskip("matplotlib")
+
+
+ class TestDataFrameGroupByPlots:
+--- a/pandas/tests/plotting/test_hist_method.py
++++ b/pandas/tests/plotting/test_hist_method.py
+@@ -4,6 +4,7 @@ import re
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import (
+ DataFrame,
+ Index,
+@@ -25,7 +26,7 @@ from pandas.tests.plotting.common import
+ get_y_axis,
+ )
+
+-mpl = pytest.importorskip("matplotlib")
++mpl = td.versioned_importorskip("matplotlib")
+
+
+ @pytest.fixture
+@@ -206,7 +207,7 @@ class TestSeriesPlots:
+
+ @pytest.mark.xfail(reason="Api changed in 3.6.0")
+ def test_hist_kde(self, ts):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ _, ax = mpl.pyplot.subplots()
+ ax = ts.plot.hist(logy=True, ax=ax)
+ _check_ax_scales(ax, yaxis="log")
+@@ -217,16 +218,16 @@ class TestSeriesPlots:
+ _check_text_labels(ylabels, [""] * len(ylabels))
+
+ def test_hist_kde_plot_works(self, ts):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ _check_plot_works(ts.plot.kde)
+
+ def test_hist_kde_density_works(self, ts):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ _check_plot_works(ts.plot.density)
+
+ @pytest.mark.xfail(reason="Api changed in 3.6.0")
+ def test_hist_kde_logy(self, ts):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ _, ax = mpl.pyplot.subplots()
+ ax = ts.plot.kde(logy=True, ax=ax)
+ _check_ax_scales(ax, yaxis="log")
+@@ -236,7 +237,7 @@ class TestSeriesPlots:
+ _check_text_labels(ylabels, [""] * len(ylabels))
+
+ def test_hist_kde_color_bins(self, ts):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ _, ax = mpl.pyplot.subplots()
+ ax = ts.plot.hist(logy=True, bins=10, color="b", ax=ax)
+ _check_ax_scales(ax, yaxis="log")
+@@ -244,7 +245,7 @@ class TestSeriesPlots:
+ _check_colors(ax.patches, facecolors=["b"] * 10)
+
+ def test_hist_kde_color(self, ts):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ _, ax = mpl.pyplot.subplots()
+ ax = ts.plot.kde(logy=True, color="r", ax=ax)
+ _check_ax_scales(ax, yaxis="log")
+@@ -631,7 +632,7 @@ class TestDataFramePlots:
+
+ def test_hist_with_nans_and_weights(self):
+ # GH 48884
+- mpl_patches = pytest.importorskip("matplotlib.patches")
++ mpl_patches = td.versioned_importorskip("matplotlib.patches")
+ df = DataFrame(
+ [[np.nan, 0.2, 0.3], [0.4, np.nan, np.nan], [0.7, 0.8, 0.9]],
+ columns=list("abc"),
+--- a/pandas/tests/plotting/test_misc.py
++++ b/pandas/tests/plotting/test_misc.py
+@@ -26,9 +26,9 @@ from pandas.tests.plotting.common import
+ _check_ticks_props,
+ )
+
+-mpl = pytest.importorskip("matplotlib")
+-plt = pytest.importorskip("matplotlib.pyplot")
+-cm = pytest.importorskip("matplotlib.cm")
++mpl = td.versioned_importorskip("matplotlib")
++plt = td.versioned_importorskip("matplotlib.pyplot")
++cm = td.versioned_importorskip("matplotlib.cm")
+
+
+ @pytest.fixture
+@@ -148,7 +148,7 @@ class TestSeriesPlots:
+ class TestDataFramePlots:
+ @pytest.mark.parametrize("pass_axis", [False, True])
+ def test_scatter_matrix_axis(self, pass_axis):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ scatter_matrix = plotting.scatter_matrix
+
+ ax = None
+@@ -173,7 +173,7 @@ class TestDataFramePlots:
+
+ @pytest.mark.parametrize("pass_axis", [False, True])
+ def test_scatter_matrix_axis_smaller(self, pass_axis):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ scatter_matrix = plotting.scatter_matrix
+
+ ax = None
+--- a/pandas/tests/plotting/test_series.py
++++ b/pandas/tests/plotting/test_series.py
+@@ -32,8 +32,8 @@ from pandas.tests.plotting.common import
+ get_y_axis,
+ )
+
+-mpl = pytest.importorskip("matplotlib")
+-plt = pytest.importorskip("matplotlib.pyplot")
++mpl = td.versioned_importorskip("matplotlib")
++plt = td.versioned_importorskip("matplotlib.pyplot")
+
+
+ @pytest.fixture
+@@ -569,16 +569,16 @@ class TestSeriesPlots:
+ ],
+ )
+ def test_kde_kwargs(self, ts, bw_method, ind):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ _check_plot_works(ts.plot.kde, bw_method=bw_method, ind=ind)
+
+ def test_density_kwargs(self, ts):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ sample_points = np.linspace(-100, 100, 20)
+ _check_plot_works(ts.plot.density, bw_method=0.5, ind=sample_points)
+
+ def test_kde_kwargs_check_axes(self, ts):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ _, ax = mpl.pyplot.subplots()
+ sample_points = np.linspace(-100, 100, 20)
+ ax = ts.plot.kde(logy=True, bw_method=0.5, ind=sample_points, ax=ax)
+@@ -586,7 +586,7 @@ class TestSeriesPlots:
+ _check_text_labels(ax.yaxis.get_label(), "Density")
+
+ def test_kde_missing_vals(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ s = Series(np.random.default_rng(2).uniform(size=50))
+ s[0] = np.nan
+ axes = _check_plot_works(s.plot.kde)
+@@ -609,7 +609,7 @@ class TestSeriesPlots:
+ plotting.PlotAccessor._common_kinds + plotting.PlotAccessor._series_kinds,
+ )
+ def test_kind_kwarg(self, kind):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ s = Series(range(3))
+ _, ax = mpl.pyplot.subplots()
+ s.plot(kind=kind, ax=ax)
+@@ -620,7 +620,7 @@ class TestSeriesPlots:
+ plotting.PlotAccessor._common_kinds + plotting.PlotAccessor._series_kinds,
+ )
+ def test_kind_attr(self, kind):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ s = Series(range(3))
+ _, ax = mpl.pyplot.subplots()
+ getattr(s.plot, kind)()
+@@ -636,7 +636,7 @@ class TestSeriesPlots:
+
+ @pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds)
+ def test_valid_object_plot(self, kind):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ s = Series(range(10), dtype=object)
+ _check_plot_works(s.plot, kind=kind)
+
+@@ -750,7 +750,7 @@ class TestSeriesPlots:
+ @pytest.mark.slow
+ def test_series_grid_settings(self):
+ # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ _check_grid_settings(
+ Series([1, 2, 3]),
+ plotting.PlotAccessor._series_kinds + plotting.PlotAccessor._common_kinds,
+--- a/pandas/tests/plotting/test_style.py
++++ b/pandas/tests/plotting/test_style.py
+@@ -1,8 +1,9 @@
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import Series
+
+-pytest.importorskip("matplotlib")
++td.versioned_importorskip("matplotlib")
+ from pandas.plotting._matplotlib.style import get_standard_colors
+
+
+--- a/pandas/tests/reductions/test_reductions.py
++++ b/pandas/tests/reductions/test_reductions.py
+@@ -7,6 +7,7 @@ from decimal import Decimal
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+ Categorical,
+@@ -1091,7 +1092,7 @@ class TestSeriesReductions:
+
+ def test_any_all_pyarrow_string(self):
+ # GH#54591
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ ser = Series(["", "a"], dtype="string[pyarrow_numpy]")
+ assert ser.any()
+ assert not ser.all()
+--- a/pandas/tests/reductions/test_stat_reductions.py
++++ b/pandas/tests/reductions/test_stat_reductions.py
+@@ -6,6 +6,7 @@ import inspect
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+ DataFrame,
+@@ -231,7 +232,7 @@ class TestSeriesStatReductions:
+ assert pd.isna(result)
+
+ def test_skew(self):
+- sp_stats = pytest.importorskip("scipy.stats")
++ sp_stats = td.versioned_importorskip("scipy.stats")
+
+ string_series = Series(range(20), dtype=np.float64, name="series")
+
+@@ -253,7 +254,7 @@ class TestSeriesStatReductions:
+ assert (df.skew() == 0).all()
+
+ def test_kurt(self):
+- sp_stats = pytest.importorskip("scipy.stats")
++ sp_stats = td.versioned_importorskip("scipy.stats")
+
+ string_series = Series(range(20), dtype=np.float64, name="series")
+
+--- a/pandas/tests/resample/test_datetime_index.py
++++ b/pandas/tests/resample/test_datetime_index.py
+@@ -1110,7 +1110,7 @@ def test_resample_dtype_preservation(uni
+
+
+ def test_resample_dtype_coercion(unit):
+- pytest.importorskip("scipy.interpolate")
++ td.versioned_importorskip("scipy.interpolate")
+
+ # GH 16361
+ df = {"a": [1, 3, 1, 4]}
+--- a/pandas/tests/reshape/merge/test_merge.py
++++ b/pandas/tests/reshape/merge/test_merge.py
+@@ -8,6 +8,7 @@ import re
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.core.dtypes.common import (
+ is_object_dtype,
+ is_string_dtype,
+@@ -2817,7 +2818,7 @@ def test_merge_ea_and_non_ea(any_numeric
+ @pytest.mark.parametrize("dtype", ["int64", "int64[pyarrow]"])
+ def test_merge_arrow_and_numpy_dtypes(dtype):
+ # GH#52406
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = DataFrame({"a": [1, 2]}, dtype=dtype)
+ df2 = DataFrame({"a": [1, 2]}, dtype="int64[pyarrow]")
+ result = df.merge(df2)
+@@ -2967,7 +2968,7 @@ def test_merge_ea_int_and_float_numpy():
+
+ def test_merge_arrow_string_index(any_string_dtype):
+ # GH#54894
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ left = DataFrame({"a": ["a", "b"]}, dtype=any_string_dtype)
+ right = DataFrame({"b": 1}, index=Index(["a", "c"], dtype=any_string_dtype))
+ result = left.merge(right, left_on="a", right_index=True, how="left")
+--- a/pandas/tests/reshape/test_melt.py
++++ b/pandas/tests/reshape/test_melt.py
+@@ -3,6 +3,7 @@ import re
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+ DataFrame,
+@@ -1224,7 +1225,7 @@ class TestWideToLong:
+
+ def test_wide_to_long_pyarrow_string_columns():
+ # GH 57066
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ df = DataFrame(
+ {
+ "ID": {0: 1},
+--- a/pandas/tests/series/accessors/test_list_accessor.py
++++ b/pandas/tests/series/accessors/test_list_accessor.py
+@@ -2,13 +2,14 @@ import re
+
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import (
+ ArrowDtype,
+ Series,
+ )
+ import pandas._testing as tm
+
+-pa = pytest.importorskip("pyarrow")
++pa = td.versioned_importorskip("pyarrow")
+
+ from pandas.compat import pa_version_under11p0
+
+--- a/pandas/tests/series/accessors/test_struct_accessor.py
++++ b/pandas/tests/series/accessors/test_struct_accessor.py
+@@ -2,6 +2,7 @@ import re
+
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.compat.pyarrow import (
+ pa_version_under11p0,
+ pa_version_under13p0,
+@@ -15,8 +16,8 @@ from pandas import (
+ )
+ import pandas._testing as tm
+
+-pa = pytest.importorskip("pyarrow")
+-pc = pytest.importorskip("pyarrow.compute")
++pa = td.versioned_importorskip("pyarrow")
++pc = td.versioned_importorskip("pyarrow.compute")
+
+
+ def test_struct_accessor_dtypes():
+--- a/pandas/tests/series/methods/test_convert_dtypes.py
++++ b/pandas/tests/series/methods/test_convert_dtypes.py
+@@ -3,6 +3,7 @@ from itertools import product
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas._libs import lib
+
+ import pandas as pd
+@@ -291,7 +292,7 @@ class TestSeriesConvertDtypes:
+
+ def test_convert_dtypes_pyarrow_to_np_nullable(self):
+ # GH 53648
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ ser = pd.Series(range(2), dtype="int32[pyarrow]")
+ result = ser.convert_dtypes(dtype_backend="numpy_nullable")
+ expected = pd.Series(range(2), dtype="Int32")
+@@ -299,7 +300,7 @@ class TestSeriesConvertDtypes:
+
+ def test_convert_dtypes_pyarrow_null(self):
+ # GH#55346
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ ser = pd.Series([None, None])
+ result = ser.convert_dtypes(dtype_backend="pyarrow")
+ expected = pd.Series([None, None], dtype=pd.ArrowDtype(pa.null()))
+--- a/pandas/tests/series/methods/test_cov_corr.py
++++ b/pandas/tests/series/methods/test_cov_corr.py
+@@ -3,6 +3,7 @@ import math
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+ Series,
+@@ -58,7 +59,7 @@ class TestSeriesCov:
+ class TestSeriesCorr:
+ @pytest.mark.parametrize("dtype", ["float64", "Float64"])
+ def test_corr(self, datetime_series, dtype):
+- stats = pytest.importorskip("scipy.stats")
++ stats = td.versioned_importorskip("scipy.stats")
+
+ datetime_series = datetime_series.astype(dtype)
+
+@@ -93,7 +94,7 @@ class TestSeriesCorr:
+ tm.assert_almost_equal(result, expected)
+
+ def test_corr_rank(self):
+- stats = pytest.importorskip("scipy.stats")
++ stats = td.versioned_importorskip("scipy.stats")
+
+ # kendall and spearman
+ A = Series(
+--- a/pandas/tests/series/methods/test_drop_duplicates.py
++++ b/pandas/tests/series/methods/test_drop_duplicates.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+ Categorical,
+@@ -252,7 +253,7 @@ class TestSeriesDropDuplicates:
+ tm.assert_series_equal(result, expected)
+
+ def test_duplicated_arrow_dtype(self):
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ ser = Series([True, False, None, False], dtype="bool[pyarrow]")
+ result = ser.drop_duplicates()
+ expected = Series([True, False, None], dtype="bool[pyarrow]")
+@@ -260,7 +261,7 @@ class TestSeriesDropDuplicates:
+
+ def test_drop_duplicates_arrow_strings(self):
+ # GH#54904
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ ser = Series(["a", "a"], dtype=pd.ArrowDtype(pa.string()))
+ result = ser.drop_duplicates()
+ expecetd = Series(["a"], dtype=pd.ArrowDtype(pa.string()))
+--- a/pandas/tests/series/methods/test_explode.py
++++ b/pandas/tests/series/methods/test_explode.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ import pandas._testing as tm
+
+@@ -146,7 +147,7 @@ def test_explode_scalars_can_ignore_inde
+ @pytest.mark.parametrize("ignore_index", [True, False])
+ def test_explode_pyarrow_list_type(ignore_index):
+ # GH 53602
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+
+ data = [
+ [None, None],
+@@ -167,7 +168,7 @@ def test_explode_pyarrow_list_type(ignor
+
+ @pytest.mark.parametrize("ignore_index", [True, False])
+ def test_explode_pyarrow_non_list_type(ignore_index):
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ data = [1, 2, 3]
+ ser = pd.Series(data, dtype=pd.ArrowDtype(pa.int64()))
+ result = ser.explode(ignore_index=ignore_index)
+--- a/pandas/tests/series/methods/test_interpolate.py
++++ b/pandas/tests/series/methods/test_interpolate.py
+@@ -118,7 +118,7 @@ class TestSeriesInterpolateData:
+ non_ts.interpolate(method="time")
+
+ def test_interpolate_cubicspline(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ ser = Series([10, 11, 12, 13])
+
+ expected = Series(
+@@ -133,7 +133,7 @@ class TestSeriesInterpolateData:
+ tm.assert_series_equal(result, expected)
+
+ def test_interpolate_pchip(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ ser = Series(np.sort(np.random.default_rng(2).uniform(size=100)))
+
+ # interpolate at new_index
+@@ -145,7 +145,7 @@ class TestSeriesInterpolateData:
+ interp_s.loc[49:51]
+
+ def test_interpolate_akima(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ ser = Series([10, 11, 12, 13])
+
+ # interpolate at new_index where `der` is zero
+@@ -171,7 +171,7 @@ class TestSeriesInterpolateData:
+ tm.assert_series_equal(interp_s.loc[1:3], expected)
+
+ def test_interpolate_piecewise_polynomial(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ ser = Series([10, 11, 12, 13])
+
+ expected = Series(
+@@ -186,7 +186,7 @@ class TestSeriesInterpolateData:
+ tm.assert_series_equal(interp_s.loc[1:3], expected)
+
+ def test_interpolate_from_derivatives(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ ser = Series([10, 11, 12, 13])
+
+ expected = Series(
+@@ -276,14 +276,14 @@ class TestSeriesInterpolateData:
+ tm.assert_series_equal(result, expected)
+
+ def test_interp_quad(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
+ result = sq.interpolate(method="quadratic")
+ expected = Series([1.0, 4.0, 9.0, 16.0], index=[1, 2, 3, 4])
+ tm.assert_series_equal(result, expected)
+
+ def test_interp_scipy_basic(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ s = Series([1, 3, np.nan, 12, np.nan, 25])
+ # slinear
+ expected = Series([1.0, 3.0, 7.5, 12.0, 18.5, 25.0])
+@@ -618,7 +618,7 @@ class TestSeriesInterpolateData:
+ tm.assert_series_equal(result, expected)
+
+ def test_interp_all_good(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ s = Series([1, 2, 3])
+ result = s.interpolate(method="polynomial", order=1)
+ tm.assert_series_equal(result, s)
+@@ -645,7 +645,7 @@ class TestSeriesInterpolateData:
+ s.interpolate(method="polynomial", order=1)
+
+ def test_interp_nonmono_raise(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ s = Series([1, np.nan, 3], index=[0, 2, 1])
+ msg = "krogh interpolation requires that the index be monotonic"
+ with pytest.raises(ValueError, match=msg):
+@@ -653,7 +653,7 @@ class TestSeriesInterpolateData:
+
+ @pytest.mark.parametrize("method", ["nearest", "pad"])
+ def test_interp_datetime64(self, method, tz_naive_fixture):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = Series(
+ [1, np.nan, 3], index=date_range("1/1/2000", periods=3, tz=tz_naive_fixture)
+ )
+@@ -699,7 +699,7 @@ class TestSeriesInterpolateData:
+ @pytest.mark.parametrize("method", ["polynomial", "spline"])
+ def test_no_order(self, method):
+ # see GH-10633, GH-24014
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ s = Series([0, 1, np.nan, 3])
+ msg = "You must specify the order of the spline or polynomial"
+ with pytest.raises(ValueError, match=msg):
+@@ -707,21 +707,21 @@ class TestSeriesInterpolateData:
+
+ @pytest.mark.parametrize("order", [-1, -1.0, 0, 0.0, np.nan])
+ def test_interpolate_spline_invalid_order(self, order):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ s = Series([0, 1, np.nan, 3])
+ msg = "order needs to be specified and greater than 0"
+ with pytest.raises(ValueError, match=msg):
+ s.interpolate(method="spline", order=order)
+
+ def test_spline(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ s = Series([1, 2, np.nan, 4, 5, np.nan, 7])
+ result = s.interpolate(method="spline", order=1)
+ expected = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])
+ tm.assert_series_equal(result, expected)
+
+ def test_spline_extrapolate(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
+ result3 = s.interpolate(method="spline", order=1, ext=3)
+ expected3 = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 6.0])
+@@ -732,7 +732,7 @@ class TestSeriesInterpolateData:
+ tm.assert_series_equal(result1, expected1)
+
+ def test_spline_smooth(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7])
+ assert (
+ s.interpolate(method="spline", order=3, s=0)[5]
+@@ -741,7 +741,7 @@ class TestSeriesInterpolateData:
+
+ def test_spline_interpolation(self):
+ # Explicit cast to float to avoid implicit cast when setting np.nan
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ s = Series(np.arange(10) ** 2, dtype="float")
+ s[np.random.default_rng(2).integers(0, 9, 3)] = np.nan
+ result1 = s.interpolate(method="spline", order=1)
+@@ -801,7 +801,7 @@ class TestSeriesInterpolateData:
+
+ method, kwargs = interp_methods_ind
+ if method == "pchip":
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+
+ if method == "linear":
+ result = df[0].interpolate(**kwargs)
+@@ -824,7 +824,7 @@ class TestSeriesInterpolateData:
+ are tested here.
+ """
+ # gh 21662
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ ind = pd.timedelta_range(start=1, periods=4)
+ df = pd.DataFrame([0, 1, np.nan, 3], index=ind)
+
+@@ -861,7 +861,7 @@ class TestSeriesInterpolateData:
+
+ def test_interpolate_fill_value(self):
+ # GH#54920
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ ser = Series([np.nan, 0, 1, np.nan, 3, np.nan])
+ result = ser.interpolate(method="nearest", fill_value=0)
+ expected = Series([np.nan, 0, 1, 1, 3, 0])
+--- a/pandas/tests/series/methods/test_rank.py
++++ b/pandas/tests/series/methods/test_rank.py
+@@ -56,7 +56,7 @@ def dtype(request):
+
+ class TestSeriesRank:
+ def test_rank(self, datetime_series):
+- sp_stats = pytest.importorskip("scipy.stats")
++ sp_stats = td.versioned_importorskip("scipy.stats")
+
+ datetime_series[::2] = np.nan
+ datetime_series[:10:3] = 4.0
+@@ -269,7 +269,7 @@ class TestSeriesRank:
+ def test_rank_tie_methods_on_infs_nans(
+ self, method, na_option, ascending, dtype, na_value, pos_inf, neg_inf
+ ):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ if dtype == "float64[pyarrow]":
+ if method == "average":
+ exp_dtype = "float64[pyarrow]"
+@@ -318,7 +318,7 @@ class TestSeriesRank:
+ ],
+ )
+ def test_rank_methods_series(self, method, op, value):
+- sp_stats = pytest.importorskip("scipy.stats")
++ sp_stats = td.versioned_importorskip("scipy.stats")
+
+ xs = np.random.default_rng(2).standard_normal(9)
+ xs = np.concatenate([xs[i:] for i in range(0, 9, 2)]) # add duplicates
+--- a/pandas/tests/series/methods/test_reset_index.py
++++ b/pandas/tests/series/methods/test_reset_index.py
+@@ -3,6 +3,7 @@ from datetime import datetime
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+ DataFrame,
+@@ -170,7 +171,7 @@ class TestResetIndex:
+
+ def test_reset_index_drop_infer_string(self):
+ # GH#56160
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ ser = Series(["a", "b", "c"], dtype=object)
+ with option_context("future.infer_string", True):
+ result = ser.reset_index(drop=True)
+--- a/pandas/tests/series/test_api.py
++++ b/pandas/tests/series/test_api.py
+@@ -4,6 +4,7 @@ import pydoc
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import (
+ DataFrame,
+@@ -169,7 +170,7 @@ class TestSeriesMisc:
+
+ def test_inspect_getmembers(self):
+ # GH38782
+- pytest.importorskip("jinja2")
++ td.versioned_importorskip("jinja2")
+ ser = Series(dtype=object)
+ msg = "Series._data is deprecated"
+ with tm.assert_produces_warning(
+--- a/pandas/tests/series/test_constructors.py
++++ b/pandas/tests/series/test_constructors.py
+@@ -2094,7 +2094,7 @@ class TestSeriesConstructors:
+
+ def test_series_string_inference(self):
+ # GH#54430
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ dtype = "string[pyarrow_numpy]"
+ expected = Series(["a", "b"], dtype=dtype)
+ with pd.option_context("future.infer_string", True):
+@@ -2109,7 +2109,7 @@ class TestSeriesConstructors:
+ @pytest.mark.parametrize("na_value", [None, np.nan, pd.NA])
+ def test_series_string_with_na_inference(self, na_value):
+ # GH#54430
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ dtype = "string[pyarrow_numpy]"
+ expected = Series(["a", na_value], dtype=dtype)
+ with pd.option_context("future.infer_string", True):
+@@ -2118,7 +2118,7 @@ class TestSeriesConstructors:
+
+ def test_series_string_inference_scalar(self):
+ # GH#54430
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ expected = Series("a", index=[1], dtype="string[pyarrow_numpy]")
+ with pd.option_context("future.infer_string", True):
+ ser = Series("a", index=[1])
+@@ -2126,7 +2126,7 @@ class TestSeriesConstructors:
+
+ def test_series_string_inference_array_string_dtype(self):
+ # GH#54496
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ expected = Series(["a", "b"], dtype="string[pyarrow_numpy]")
+ with pd.option_context("future.infer_string", True):
+ ser = Series(np.array(["a", "b"]))
+@@ -2134,7 +2134,7 @@ class TestSeriesConstructors:
+
+ def test_series_string_inference_storage_definition(self):
+ # GH#54793
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ expected = Series(["a", "b"], dtype="string[pyarrow_numpy]")
+ with pd.option_context("future.infer_string", True):
+ result = Series(["a", "b"], dtype="string")
+@@ -2150,7 +2150,7 @@ class TestSeriesConstructors:
+
+ def test_series_string_inference_na_first(self):
+ # GH#55655
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ expected = Series([pd.NA, "b"], dtype="string[pyarrow_numpy]")
+ with pd.option_context("future.infer_string", True):
+ result = Series([pd.NA, "b"])
+--- a/pandas/tests/series/test_formats.py
++++ b/pandas/tests/series/test_formats.py
+@@ -6,6 +6,7 @@ from datetime import (
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas._config import using_pyarrow_string_dtype
+
+ import pandas as pd
+@@ -227,7 +228,7 @@ class TestSeriesRepr:
+ repr(ts2).splitlines()[-1]
+
+ def test_latex_repr(self):
+- pytest.importorskip("jinja2") # uses Styler implementation
++ td.versioned_importorskip("jinja2") # uses Styler implementation
+ result = r"""\begin{tabular}{ll}
+ \toprule
+ & 0 \\
+--- a/pandas/tests/series/test_logical_ops.py
++++ b/pandas/tests/series/test_logical_ops.py
+@@ -4,6 +4,7 @@ import operator
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import (
+ DataFrame,
+ Index,
+@@ -533,7 +534,7 @@ class TestSeriesLogicalOps:
+
+ def test_pyarrow_numpy_string_invalid(self):
+ # GH#56008
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ ser = Series([False, True])
+ ser2 = Series(["a", "b"], dtype="string[pyarrow_numpy]")
+ result = ser == ser2
+--- a/pandas/tests/series/test_reductions.py
++++ b/pandas/tests/series/test_reductions.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ import pandas as pd
+ from pandas import Series
+ import pandas._testing as tm
+@@ -53,7 +54,7 @@ def test_mode_nullable_dtype(any_numeric
+
+ def test_mode_infer_string():
+ # GH#56183
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ ser = Series(["a", "b"], dtype=object)
+ with pd.option_context("future.infer_string", True):
+ result = ser.mode()
+--- a/pandas/tests/strings/test_extract.py
++++ b/pandas/tests/strings/test_extract.py
+@@ -4,6 +4,7 @@ import re
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.core.dtypes.dtypes import ArrowDtype
+
+ from pandas import (
+@@ -718,7 +719,7 @@ def test_extractall_same_as_extract_subj
+ def test_extractall_preserves_dtype():
+ # Ensure that when extractall is called on a series with specific dtypes set, that
+ # the dtype is preserved in the resulting DataFrame's column.
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+
+ result = Series(["abc", "ab"], dtype=ArrowDtype(pa.string())).str.extractall("(ab)")
+ assert result.dtypes[0] == "string[pyarrow]"
+--- a/pandas/tests/test_algos.py
++++ b/pandas/tests/test_algos.py
+@@ -4,6 +4,7 @@ import struct
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas._libs import (
+ algos as libalgos,
+ hashtable as ht,
+@@ -1789,7 +1790,7 @@ class TestRank:
+ ],
+ )
+ def test_scipy_compat(self, arr):
+- sp_stats = pytest.importorskip("scipy.stats")
++ sp_stats = td.versioned_importorskip("scipy.stats")
+
+ arr = np.array(arr)
+
+--- a/pandas/tests/test_downstream.py
++++ b/pandas/tests/test_downstream.py
+@@ -44,8 +44,8 @@ def test_dask(df):
+ olduse = pd.get_option("compute.use_numexpr")
+
+ try:
+- pytest.importorskip("toolz")
+- dd = pytest.importorskip("dask.dataframe")
++ td.versioned_importorskip("toolz")
++ dd = td.versioned_importorskip("dask.dataframe")
+
+ ddf = dd.from_pandas(df, npartitions=3)
+ assert ddf.A is not None
+@@ -61,8 +61,8 @@ def test_dask_ufunc():
+ olduse = pd.get_option("compute.use_numexpr")
+
+ try:
+- da = pytest.importorskip("dask.array")
+- dd = pytest.importorskip("dask.dataframe")
++ da = td.versioned_importorskip("dask.array")
++ dd = td.versioned_importorskip("dask.dataframe")
+
+ s = Series([1.5, 2.3, 3.7, 4.0])
+ ds = dd.from_pandas(s, npartitions=2)
+@@ -78,7 +78,7 @@ def test_dask_ufunc():
+ def test_construct_dask_float_array_int_dtype_match_ndarray():
+ # GH#40110 make sure we treat a float-dtype dask array with the same
+ # rules we would for an ndarray
+- dd = pytest.importorskip("dask.dataframe")
++ dd = td.versioned_importorskip("dask.dataframe")
+
+ arr = np.array([1, 2.5, 3])
+ darr = dd.from_array(arr)
+@@ -102,15 +102,15 @@ def test_construct_dask_float_array_int_
+
+
+ def test_xarray(df):
+- pytest.importorskip("xarray")
++ td.versioned_importorskip("xarray")
+
+ assert df.to_xarray() is not None
+
+
+ def test_xarray_cftimeindex_nearest():
+ # https://github.com/pydata/xarray/issues/3751
+- cftime = pytest.importorskip("cftime")
+- xarray = pytest.importorskip("xarray")
++ cftime = td.versioned_importorskip("cftime")
++ xarray = td.versioned_importorskip("xarray")
+
+ times = xarray.cftime_range("0001", periods=2)
+ key = cftime.DatetimeGregorian(2000, 1, 1)
+@@ -142,7 +142,7 @@ def test_oo_optimized_datetime_index_unp
+
+
+ def test_statsmodels():
+- smf = pytest.importorskip("statsmodels.formula.api")
++ smf = td.versioned_importorskip("statsmodels.formula.api")
+
+ df = DataFrame(
+ {"Lottery": range(5), "Literacy": range(5), "Pop1831": range(100, 105)}
+@@ -151,7 +151,7 @@ def test_statsmodels():
+
+
+ def test_scikit_learn():
+- pytest.importorskip("sklearn")
++ td.versioned_importorskip("sklearn")
+ from sklearn import (
+ datasets,
+ svm,
+@@ -164,7 +164,7 @@ def test_scikit_learn():
+
+
+ def test_seaborn():
+- seaborn = pytest.importorskip("seaborn")
++ seaborn = td.versioned_importorskip("seaborn")
+ tips = DataFrame(
+ {"day": pd.date_range("2023", freq="D", periods=5), "total_bill": range(5)}
+ )
+@@ -172,12 +172,12 @@ def test_seaborn():
+
+
+ def test_pandas_datareader():
+- pytest.importorskip("pandas_datareader")
++ td.versioned_importorskip("pandas_datareader")
+
+
+ @pytest.mark.filterwarnings("ignore:Passing a BlockManager:DeprecationWarning")
+ def test_pyarrow(df):
+- pyarrow = pytest.importorskip("pyarrow")
++ pyarrow = td.versioned_importorskip("pyarrow")
+ table = pyarrow.Table.from_pandas(df)
+ result = table.to_pandas()
+ tm.assert_frame_equal(result, df)
+@@ -185,7 +185,7 @@ def test_pyarrow(df):
+
+ def test_yaml_dump(df):
+ # GH#42748
+- yaml = pytest.importorskip("yaml")
++ yaml = td.versioned_importorskip("yaml")
+
+ dumped = yaml.dump(df)
+
+@@ -247,7 +247,7 @@ def test_frame_setitem_dask_array_into_n
+ olduse = pd.get_option("compute.use_numexpr")
+
+ try:
+- da = pytest.importorskip("dask.array")
++ da = td.versioned_importorskip("dask.array")
+
+ dda = da.array([1, 2])
+ df = DataFrame({"a": ["a", "b"]})
+@@ -348,7 +348,7 @@ def test_dataframe_consortium() -> None:
+ Full testing is done at https://github.com/data-apis/dataframe-api-compat,
+ this is just to check that the entry point works as expected.
+ """
+- pytest.importorskip("dataframe_api_compat")
++ td.versioned_importorskip("dataframe_api_compat")
+ df_pd = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
+ df = df_pd.__dataframe_consortium_standard__()
+ result_1 = df.get_column_names()
+@@ -362,7 +362,7 @@ def test_dataframe_consortium() -> None:
+
+ def test_xarray_coerce_unit():
+ # GH44053
+- xr = pytest.importorskip("xarray")
++ xr = td.versioned_importorskip("xarray")
+
+ arr = xr.DataArray([1, 2, 3])
+ result = pd.to_datetime(arr, unit="ns")
+--- a/pandas/tests/test_nanops.py
++++ b/pandas/tests/test_nanops.py
+@@ -500,7 +500,7 @@ class TestnanopsDataFrame:
+
+ @pytest.mark.parametrize("ddof", range(3))
+ def test_nansem(self, ddof, skipna):
+- sp_stats = pytest.importorskip("scipy.stats")
++ sp_stats = td.versioned_importorskip("scipy.stats")
+
+ with np.errstate(invalid="ignore"):
+ self.check_funs(
+@@ -559,7 +559,7 @@ class TestnanopsDataFrame:
+ return result
+
+ def test_nanskew(self, skipna):
+- sp_stats = pytest.importorskip("scipy.stats")
++ sp_stats = td.versioned_importorskip("scipy.stats")
+
+ func = partial(self._skew_kurt_wrap, func=sp_stats.skew)
+ with np.errstate(invalid="ignore"):
+@@ -573,7 +573,7 @@ class TestnanopsDataFrame:
+ )
+
+ def test_nankurt(self, skipna):
+- sp_stats = pytest.importorskip("scipy.stats")
++ sp_stats = td.versioned_importorskip("scipy.stats")
+
+ func1 = partial(sp_stats.kurtosis, fisher=True)
+ func = partial(self._skew_kurt_wrap, func=func1)
+@@ -704,7 +704,7 @@ class TestnanopsDataFrame:
+ self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="pearson")
+
+ def test_nancorr_kendall(self):
+- sp_stats = pytest.importorskip("scipy.stats")
++ sp_stats = td.versioned_importorskip("scipy.stats")
+
+ targ0 = sp_stats.kendalltau(self.arr_float_2d, self.arr_float1_2d)[0]
+ targ1 = sp_stats.kendalltau(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
+@@ -714,7 +714,7 @@ class TestnanopsDataFrame:
+ self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="kendall")
+
+ def test_nancorr_spearman(self):
+- sp_stats = pytest.importorskip("scipy.stats")
++ sp_stats = td.versioned_importorskip("scipy.stats")
+
+ targ0 = sp_stats.spearmanr(self.arr_float_2d, self.arr_float1_2d)[0]
+ targ1 = sp_stats.spearmanr(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
+@@ -724,7 +724,7 @@ class TestnanopsDataFrame:
+ self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="spearman")
+
+ def test_invalid_method(self):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
+ targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]
+ msg = "Unknown method 'foo', expected one of 'kendall', 'spearman'"
+--- a/pandas/tests/test_optional_dependency.py
++++ b/pandas/tests/test_optional_dependency.py
+@@ -3,6 +3,7 @@ import types
+
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.compat._optional import (
+ VERSIONS,
+ import_optional_dependency,
+@@ -23,7 +24,7 @@ def test_import_optional():
+
+
+ def test_xlrd_version_fallback():
+- pytest.importorskip("xlrd")
++ td.versioned_importorskip("xlrd")
+ import_optional_dependency("xlrd")
+
+
+--- a/pandas/tests/tools/test_to_datetime.py
++++ b/pandas/tests/tools/test_to_datetime.py
+@@ -1002,7 +1002,7 @@ class TestToDatetime:
+ @pytest.mark.parametrize("utc", [True, False])
+ @pytest.mark.parametrize("tz", [None, "US/Central"])
+ def test_to_datetime_arrow(self, tz, utc, arg_class):
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+
+ dti = date_range("1965-04-03", periods=19, freq="2W", tz=tz)
+ dti = arg_class(dti)
+@@ -1357,7 +1357,7 @@ class TestToDatetime:
+
+ def test_to_datetime_tz_psycopg2(self, request, cache):
+ # xref 8260
+- psycopg2_tz = pytest.importorskip("psycopg2.tz")
++ psycopg2_tz = td.versioned_importorskip("psycopg2.tz")
+
+ # misc cases
+ tz1 = psycopg2_tz.FixedOffsetTimezone(offset=-300, name=None)
+@@ -3742,7 +3742,7 @@ def test_ignoring_unknown_tz_deprecated(
+
+ def test_from_numeric_arrow_dtype(any_numeric_ea_dtype):
+ # GH 52425
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ ser = Series([1, 2], dtype=f"{any_numeric_ea_dtype.lower()}[pyarrow]")
+ result = to_datetime(ser)
+ expected = Series([1, 2], dtype="datetime64[ns]")
+--- a/pandas/tests/tools/test_to_numeric.py
++++ b/pandas/tests/tools/test_to_numeric.py
+@@ -867,7 +867,7 @@ def test_to_numeric_dtype_backend(val, d
+ def test_to_numeric_dtype_backend_na(val, dtype):
+ # GH#50505
+ if "pyarrow" in dtype:
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ dtype_backend = "pyarrow"
+ else:
+ dtype_backend = "numpy_nullable"
+@@ -891,7 +891,7 @@ def test_to_numeric_dtype_backend_na(val
+ def test_to_numeric_dtype_backend_downcasting(val, dtype, downcast):
+ # GH#50505
+ if "pyarrow" in dtype:
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ dtype_backend = "pyarrow"
+ else:
+ dtype_backend = "numpy_nullable"
+@@ -908,7 +908,7 @@ def test_to_numeric_dtype_backend_downca
+ def test_to_numeric_dtype_backend_downcasting_uint(smaller, dtype_backend):
+ # GH#50505
+ if dtype_backend == "pyarrow":
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ ser = Series([1, pd.NA], dtype="UInt64")
+ result = to_numeric(ser, dtype_backend=dtype_backend, downcast="unsigned")
+ expected = Series([1, pd.NA], dtype=smaller)
+@@ -931,7 +931,7 @@ def test_to_numeric_dtype_backend_downca
+ def test_to_numeric_dtype_backend_already_nullable(dtype):
+ # GH#50505
+ if "pyarrow" in dtype:
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ ser = Series([1, pd.NA], dtype=dtype)
+ result = to_numeric(ser, dtype_backend="numpy_nullable")
+ expected = Series([1, pd.NA], dtype=dtype)
+@@ -971,7 +971,7 @@ def test_invalid_dtype_backend():
+
+ def test_coerce_pyarrow_backend():
+ # GH 52588
+- pa = pytest.importorskip("pyarrow")
++ pa = td.versioned_importorskip("pyarrow")
+ ser = Series(list("12x"), dtype=ArrowDtype(pa.string()))
+ result = to_numeric(ser, errors="coerce", dtype_backend="pyarrow")
+ expected = Series([1, 2, None], dtype=ArrowDtype(pa.int64()))
+--- a/pandas/tests/tools/test_to_timedelta.py
++++ b/pandas/tests/tools/test_to_timedelta.py
+@@ -6,6 +6,7 @@ from datetime import (
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas.compat import IS64
+ from pandas.errors import OutOfBoundsTimedelta
+
+@@ -324,7 +325,7 @@ class TestTimedeltas:
+
+ def test_from_numeric_arrow_dtype(any_numeric_ea_dtype):
+ # GH 52425
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ ser = Series([1, 2], dtype=f"{any_numeric_ea_dtype.lower()}[pyarrow]")
+ result = to_timedelta(ser)
+ expected = Series([1, 2], dtype="timedelta64[ns]")
+@@ -334,7 +335,7 @@ def test_from_numeric_arrow_dtype(any_nu
+ @pytest.mark.parametrize("unit", ["ns", "ms"])
+ def test_from_timedelta_arrow_dtype(unit):
+ # GH 54298
+- pytest.importorskip("pyarrow")
++ td.versioned_importorskip("pyarrow")
+ expected = Series([timedelta(1)], dtype=f"duration[{unit}][pyarrow]")
+ result = to_timedelta(expected)
+ tm.assert_series_equal(result, expected)
+--- a/pandas/tests/window/test_online.py
++++ b/pandas/tests/window/test_online.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import (
+ DataFrame,
+ Series,
+@@ -9,7 +10,7 @@ import pandas._testing as tm
+
+ pytestmark = pytest.mark.single_cpu
+
+-pytest.importorskip("numba")
++td.versioned_importorskip("numba")
+
+
+ @pytest.mark.filterwarnings("ignore")
+--- a/pandas/tests/window/test_rolling_skew_kurt.py
++++ b/pandas/tests/window/test_rolling_skew_kurt.py
+@@ -3,6 +3,7 @@ from functools import partial
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import (
+ DataFrame,
+ Series,
+@@ -17,7 +18,7 @@ from pandas.tseries import offsets
+
+ @pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
+ def test_series(series, sp_func, roll_func):
+- sp_stats = pytest.importorskip("scipy.stats")
++ sp_stats = td.versioned_importorskip("scipy.stats")
+
+ compare_func = partial(getattr(sp_stats, sp_func), bias=False)
+ result = getattr(series.rolling(50), roll_func)()
+@@ -27,7 +28,7 @@ def test_series(series, sp_func, roll_fu
+
+ @pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
+ def test_frame(raw, frame, sp_func, roll_func):
+- sp_stats = pytest.importorskip("scipy.stats")
++ sp_stats = td.versioned_importorskip("scipy.stats")
+
+ compare_func = partial(getattr(sp_stats, sp_func), bias=False)
+ result = getattr(frame.rolling(50), roll_func)()
+@@ -41,7 +42,7 @@ def test_frame(raw, frame, sp_func, roll
+
+ @pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
+ def test_time_rule_series(series, sp_func, roll_func):
+- sp_stats = pytest.importorskip("scipy.stats")
++ sp_stats = td.versioned_importorskip("scipy.stats")
+
+ compare_func = partial(getattr(sp_stats, sp_func), bias=False)
+ win = 25
+@@ -56,7 +57,7 @@ def test_time_rule_series(series, sp_fun
+
+ @pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
+ def test_time_rule_frame(raw, frame, sp_func, roll_func):
+- sp_stats = pytest.importorskip("scipy.stats")
++ sp_stats = td.versioned_importorskip("scipy.stats")
+
+ compare_func = partial(getattr(sp_stats, sp_func), bias=False)
+ win = 25
+@@ -75,7 +76,7 @@ def test_time_rule_frame(raw, frame, sp_
+
+ @pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
+ def test_nans(sp_func, roll_func):
+- sp_stats = pytest.importorskip("scipy.stats")
++ sp_stats = td.versioned_importorskip("scipy.stats")
+
+ compare_func = partial(getattr(sp_stats, sp_func), bias=False)
+ obj = Series(np.random.default_rng(2).standard_normal(50))
+--- a/pandas/tests/window/test_win_type.py
++++ b/pandas/tests/window/test_win_type.py
+@@ -1,6 +1,7 @@
+ import numpy as np
+ import pytest
+
++import pandas.util._test_decorators as td
+ from pandas import (
+ DataFrame,
+ Series,
+@@ -35,7 +36,7 @@ def win_types_special(request):
+
+ def test_constructor(frame_or_series):
+ # GH 12669
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ c = frame_or_series(range(5)).rolling
+
+ # valid
+@@ -47,7 +48,7 @@ def test_constructor(frame_or_series):
+ @pytest.mark.parametrize("w", [2.0, "foo", np.array([2])])
+ def test_invalid_constructor(frame_or_series, w):
+ # not valid
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ c = frame_or_series(range(5)).rolling
+ with pytest.raises(ValueError, match="min_periods must be an integer"):
+ c(win_type="boxcar", window=2, min_periods=w)
+@@ -57,7 +58,7 @@ def test_invalid_constructor(frame_or_se
+
+ @pytest.mark.parametrize("wt", ["foobar", 1])
+ def test_invalid_constructor_wintype(frame_or_series, wt):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ c = frame_or_series(range(5)).rolling
+ with pytest.raises(ValueError, match="Invalid win_type"):
+ c(win_type=wt, window=2)
+@@ -65,14 +66,14 @@ def test_invalid_constructor_wintype(fra
+
+ def test_constructor_with_win_type(frame_or_series, win_types):
+ # GH 12669
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ c = frame_or_series(range(5)).rolling
+ c(win_type=win_types, window=2)
+
+
+ @pytest.mark.parametrize("arg", ["median", "kurt", "skew"])
+ def test_agg_function_support(arg):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = DataFrame({"A": np.arange(5)})
+ roll = df.rolling(2, win_type="triang")
+
+@@ -89,7 +90,7 @@ def test_agg_function_support(arg):
+
+ def test_invalid_scipy_arg():
+ # This error is raised by scipy
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ msg = r"boxcar\(\) got an unexpected"
+ with pytest.raises(TypeError, match=msg):
+ Series(range(3)).rolling(1, win_type="boxcar").mean(foo="bar")
+@@ -97,7 +98,7 @@ def test_invalid_scipy_arg():
+
+ def test_constructor_with_win_type_invalid(frame_or_series):
+ # GH 13383
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ c = frame_or_series(range(5)).rolling
+
+ msg = "window must be an integer 0 or greater"
+@@ -108,7 +109,7 @@ def test_constructor_with_win_type_inval
+
+ def test_window_with_args(step):
+ # make sure that we are aggregating window functions correctly with arg
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ r = Series(np.random.default_rng(2).standard_normal(100)).rolling(
+ window=10, min_periods=1, win_type="gaussian", step=step
+ )
+@@ -130,7 +131,7 @@ def test_window_with_args(step):
+
+
+ def test_win_type_with_method_invalid():
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ with pytest.raises(
+ NotImplementedError, match="'single' is the only supported method type."
+ ):
+@@ -140,7 +141,7 @@ def test_win_type_with_method_invalid():
+ @pytest.mark.parametrize("arg", [2000000000, "2s", Timedelta("2s")])
+ def test_consistent_win_type_freq(arg):
+ # GH 15969
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ s = Series(range(1))
+ with pytest.raises(ValueError, match="Invalid win_type freq"):
+ s.rolling(arg, win_type="freq")
+@@ -153,7 +154,7 @@ def test_win_type_freq_return_none():
+
+
+ def test_win_type_not_implemented():
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+
+ class CustomIndexer(BaseIndexer):
+ def get_window_bounds(self, num_values, min_periods, center, closed, step):
+@@ -167,7 +168,7 @@ def test_win_type_not_implemented():
+
+ def test_cmov_mean(step):
+ # GH 8238
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
+ result = Series(vals).rolling(5, center=True, step=step).mean()
+ expected_values = [
+@@ -188,7 +189,7 @@ def test_cmov_mean(step):
+
+ def test_cmov_window(step):
+ # GH 8238
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
+ result = Series(vals).rolling(5, win_type="boxcar", center=True, step=step).mean()
+ expected_values = [
+@@ -210,7 +211,7 @@ def test_cmov_window(step):
+ def test_cmov_window_corner(step):
+ # GH 8238
+ # all nan
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ vals = Series([np.nan] * 10)
+ result = vals.rolling(5, center=True, win_type="boxcar", step=step).mean()
+ assert np.isnan(result).all()
+@@ -294,7 +295,7 @@ def test_cmov_window_corner(step):
+ )
+ def test_cmov_window_frame(f, xp, step):
+ # Gh 8238
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = DataFrame(
+ np.array(
+ [
+@@ -321,7 +322,7 @@ def test_cmov_window_frame(f, xp, step):
+
+ @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4, 5])
+ def test_cmov_window_na_min_periods(step, min_periods):
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ vals = Series(np.random.default_rng(2).standard_normal(10))
+ vals[4] = np.nan
+ vals[8] = np.nan
+@@ -335,7 +336,7 @@ def test_cmov_window_na_min_periods(step
+
+ def test_cmov_window_regular(win_types, step):
+ # GH 8238
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
+ xps = {
+ "hamming": [
+@@ -443,7 +444,7 @@ def test_cmov_window_regular(win_types,
+
+ def test_cmov_window_regular_linear_range(win_types, step):
+ # GH 8238
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ vals = np.array(range(10), dtype=float)
+ xp = vals.copy()
+ xp[:2] = np.nan
+@@ -456,7 +457,7 @@ def test_cmov_window_regular_linear_rang
+
+ def test_cmov_window_regular_missing_data(win_types, step):
+ # GH 8238
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ vals = np.array(
+ [6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, np.nan, 10.63, 14.48]
+ )
+@@ -566,7 +567,7 @@ def test_cmov_window_regular_missing_dat
+
+ def test_cmov_window_special(win_types_special, step):
+ # GH 8238
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ kwds = {
+ "kaiser": {"beta": 1.0},
+ "gaussian": {"std": 1.0},
+@@ -638,7 +639,7 @@ def test_cmov_window_special(win_types_s
+
+ def test_cmov_window_special_linear_range(win_types_special, step):
+ # GH 8238
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ kwds = {
+ "kaiser": {"beta": 1.0},
+ "gaussian": {"std": 1.0},
+@@ -663,7 +664,7 @@ def test_cmov_window_special_linear_rang
+
+ def test_weighted_var_big_window_no_segfault(win_types, center):
+ # GitHub Issue #46772
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ x = Series(0)
+ result = x.rolling(window=16, center=center, win_type=win_types).var()
+ expected = Series(np.nan)
+@@ -672,7 +673,7 @@ def test_weighted_var_big_window_no_segf
+
+
+ def test_rolling_center_axis_1():
+- pytest.importorskip("scipy")
++ td.versioned_importorskip("scipy")
+ df = DataFrame(
+ {"a": [1, 1, 0, 0, 0, 1], "b": [1, 0, 0, 1, 0, 0], "c": [1, 0, 0, 1, 0, 1]}
+ )
+--- a/pandas/util/_test_decorators.py
++++ b/pandas/util/_test_decorators.py
+@@ -79,8 +79,8 @@ def skip_if_no(package: str, min_version
+
+ The mark can be used as either a decorator for a test class or to be
+ applied to parameters in pytest.mark.parametrize calls or parametrized
+- fixtures. Use pytest.importorskip if an imported moduled is later needed
+- or for test functions.
++ fixtures. Use td.versioned_importorskip if an imported module is later
++ needed or for test functions.
+
+ If the import and version check are unsuccessful, then the test function
+ (or test case when used in conjunction with parametrization) will be
+@@ -171,3 +171,22 @@ skip_copy_on_write_invalid_test = pytest
+ get_option("mode.copy_on_write") is True,
+ reason="Test not valid for Copy-on-Write mode",
+ )
++
++def versioned_importorskip(*args, **kwargs):
++ """
++ (warning - this is currently Debian-specific, the name may change if upstream request this)
++
++ Return the requested module, or skip the test if it is
++ not available in a new enough version.
++
++ Intended as a replacement for pytest.importorskip that
++ defaults to requiring at least pandas' minimum version for that
++ optional dependency, rather than any version.
++
++ See import_optional_dependency for full parameter documentation.
++ """
++ try:
++ module = import_optional_dependency(*args, **kwargs)
++ except ImportError as exc:
++ pytest.skip(str(exc), allow_module_level=True)
++ return module