pandas object
The original object that was serialized and then re-read.
"""
- import pytest
+ import pandas.util._test_decorators as td
- LocalPath = pytest.importorskip("py.path").local
+ LocalPath = td.versioned_importorskip("py.path").local
if path is None:
path = "___localpath___"
with ensure_clean(path) as path:
Will raise a skip if IPython is not installed.
"""
- pytest.importorskip("IPython", minversion="6.0.0")
+ td.versioned_importorskip("IPython", min_version="6.0.0")
from IPython.core.interactiveshell import InteractiveShell
# GH#35711 make sure sqlite history file handle is not leaked
"""
Yields scipy sparse matrix classes.
"""
- sparse = pytest.importorskip("scipy.sparse")
+ sparse = td.versioned_importorskip("scipy.sparse")
return getattr(sparse, request.param + "_matrix")
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
@pytest.fixture(params=["python", pytest.param("numba", marks=pytest.mark.single_cpu)])
def engine(request):
if request.param == "numba":
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
return request.param
def test_numba_vs_python_string_index():
# GH#56189
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = DataFrame(
1,
index=Index(["a", "b"], dtype="string[pyarrow_numpy]"),
import pytest
+import pandas.util._test_decorators as td
import pandas._testing as tm
class TestCategoricalWarnings:
def test_tab_complete_warning(self, ip):
# https://github.com/pandas-dev/pandas/issues/16409
- pytest.importorskip("IPython", minversion="6.0.0")
+ td.versioned_importorskip("IPython", min_version="6.0.0")
from IPython.core.completer import provisionalcompleter
code = "import pandas as pd; c = pd.Categorical([])"
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas._libs import iNaT
from pandas.core.dtypes.dtypes import DatetimeTZDtype
def test_from_arrow_with_different_units_and_timezones_with(
pa_unit, pd_unit, pa_tz, pd_tz, data
):
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
pa_type = pa.timestamp(pa_unit, tz=pa_tz)
arr = pa.array(data, type=pa_type)
],
)
def test_from_arrow_from_empty(unit, tz):
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
data = []
arr = pa.array(data)
def test_from_arrow_from_integers():
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
data = [0, 123456789, None, 2**63 - 1, iNaT, -123456789]
arr = pa.array(data)
import numpy as np
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
def test_arrow_extension_type():
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
from pandas.core.arrays.arrow.extension_types import ArrowIntervalType
def test_arrow_array():
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
from pandas.core.arrays.arrow.extension_types import ArrowIntervalType
def test_arrow_array_missing():
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
from pandas.core.arrays.arrow.extension_types import ArrowIntervalType
ids=["float", "datetime64[ns]"],
)
def test_arrow_table_roundtrip(breaks):
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
from pandas.core.arrays.arrow.extension_types import ArrowIntervalType
ids=["float", "datetime64[ns]"],
)
def test_arrow_table_roundtrip_without_metadata(breaks):
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
arr = IntervalArray.from_breaks(breaks)
arr[1] = None
# in case pyarrow lost the Interval extension type (eg on parquet roundtrip
# with datetime64[ns] subtype, see GH-45881), still allow conversion
# from arrow to IntervalArray
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
arr = pa.array([{"left": 0, "right": 1}, {"left": 1, "right": 2}])
dtype = pd.IntervalDtype(np.dtype("int64"), closed="neither")
import numpy as np
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
"ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
)
-pa = pytest.importorskip("pyarrow")
+pa = td.versioned_importorskip("pyarrow")
from pandas.core.arrays.arrow._arrow_utils import pyarrow_array_to_numpy_and_mask
import pytest
+import pandas.util._test_decorators as td
from pandas.compat.pyarrow import pa_version_under10p1
from pandas.core.dtypes.dtypes import PeriodDtype
)
-pa = pytest.importorskip("pyarrow")
+pa = td.versioned_importorskip("pyarrow")
def test_arrow_extension_type():
import numpy as np
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
from pandas import SparseDtype
import pandas._testing as tm
assert result == expected
def test_from_coo(self):
- scipy_sparse = pytest.importorskip("scipy.sparse")
+ scipy_sparse = td.versioned_importorskip("scipy.sparse")
row = [0, 3, 1, 0]
col = [0, 3, 1, 2]
def test_to_coo(
self, sort_labels, expected_rows, expected_cols, expected_values_pos
):
- sp_sparse = pytest.importorskip("scipy.sparse")
+ sp_sparse = td.versioned_importorskip("scipy.sparse")
values = SparseArray([0, np.nan, 1, 0, None, 3], fill_value=0)
index = pd.MultiIndex.from_tuples(
@pytest.mark.parametrize("labels", [None, list(string.ascii_letters[:10])])
@pytest.mark.parametrize("dtype", ["float64", "int64"])
def test_from_spmatrix(self, format, labels, dtype):
- sp_sparse = pytest.importorskip("scipy.sparse")
+ sp_sparse = td.versioned_importorskip("scipy.sparse")
sp_dtype = SparseDtype(dtype, np.array(0, dtype=dtype).item())
@pytest.mark.parametrize("format", ["csc", "csr", "coo"])
def test_from_spmatrix_including_explicit_zero(self, format):
- sp_sparse = pytest.importorskip("scipy.sparse")
+ sp_sparse = td.versioned_importorskip("scipy.sparse")
mat = sp_sparse.random(10, 2, density=0.5, format=format)
mat.data[0] = 0
[["a", "b"], pd.MultiIndex.from_product([["A"], ["a", "b"]]), ["a", "a"]],
)
def test_from_spmatrix_columns(self, columns):
- sp_sparse = pytest.importorskip("scipy.sparse")
+ sp_sparse = td.versioned_importorskip("scipy.sparse")
dtype = SparseDtype("float64", 0.0)
"colnames", [("A", "B"), (1, 2), (1, pd.NA), (0.1, 0.2), ("x", "x"), (0, 0)]
)
def test_to_coo(self, colnames):
- sp_sparse = pytest.importorskip("scipy.sparse")
+ sp_sparse = td.versioned_importorskip("scipy.sparse")
df = pd.DataFrame(
{colnames[0]: [0, 1, 0], colnames[1]: [1, 0, 0]}, dtype="Sparse[int64, 0]"
@pytest.mark.parametrize("fill_value", [1, np.nan])
def test_to_coo_nonzero_fill_val_raises(self, fill_value):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = pd.DataFrame(
{
"A": SparseArray(
def test_to_coo_midx_categorical(self):
# GH#50996
- sp_sparse = pytest.importorskip("scipy.sparse")
+ sp_sparse = td.versioned_importorskip("scipy.sparse")
midx = pd.MultiIndex.from_arrays(
[
@pytest.mark.parametrize("dtype", ["int64", "float64"])
@pytest.mark.parametrize("dense_index", [True, False])
def test_series_from_coo(self, dtype, dense_index):
- sp_sparse = pytest.importorskip("scipy.sparse")
+ sp_sparse = td.versioned_importorskip("scipy.sparse")
A = sp_sparse.eye(3, format="coo", dtype=dtype)
result = pd.Series.sparse.from_coo(A, dense_index=dense_index)
def test_series_from_coo_incorrect_format_raises(self):
# gh-26554
- sp_sparse = pytest.importorskip("scipy.sparse")
+ sp_sparse = td.versioned_importorskip("scipy.sparse")
m = sp_sparse.csr_matrix(np.array([[0, 1], [0, 0]]))
with pytest.raises(
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas._libs.sparse import IntIndex
import pandas as pd
@pytest.mark.parametrize("format", ["coo", "csc", "csr"])
@pytest.mark.parametrize("size", [0, 10])
def test_from_spmatrix(self, size, format):
- sp_sparse = pytest.importorskip("scipy.sparse")
+ sp_sparse = td.versioned_importorskip("scipy.sparse")
mat = sp_sparse.random(size, 1, density=0.5, format=format)
result = SparseArray.from_spmatrix(mat)
@pytest.mark.parametrize("format", ["coo", "csc", "csr"])
def test_from_spmatrix_including_explicit_zero(self, format):
- sp_sparse = pytest.importorskip("scipy.sparse")
+ sp_sparse = td.versioned_importorskip("scipy.sparse")
mat = sp_sparse.random(10, 1, density=0.5, format=format)
mat.data[0] = 0
tm.assert_numpy_array_equal(result, expected)
def test_from_spmatrix_raises(self):
- sp_sparse = pytest.importorskip("scipy.sparse")
+ sp_sparse = td.versioned_importorskip("scipy.sparse")
mat = sp_sparse.eye(5, 4, format="csc")
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas.compat.pyarrow import pa_version_under12p0
from pandas.core.dtypes.common import is_dtype_equal
def test_arrow_array(dtype):
# protocol added in 0.15.0
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
import pyarrow.compute as pc
data = pd.array(["a", "b", "c"], dtype=dtype)
@pytest.mark.filterwarnings("ignore:Passing a BlockManager:DeprecationWarning")
def test_arrow_roundtrip(dtype, string_storage2, request, using_infer_string):
# roundtrip possible from arrow 1.0.0
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
if using_infer_string and string_storage2 != "pyarrow_numpy":
request.applymarker(
dtype, string_storage2, request, using_infer_string
):
# GH-41040
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
if using_infer_string and string_storage2 != "pyarrow_numpy":
request.applymarker(
def test_eq_all_na():
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
a = pd.array([pd.NA, pd.NA], dtype=StringDtype("pyarrow"))
result = a == a
expected = pd.array([pd.NA, pd.NA], dtype="boolean[pyarrow]")
@pytest.mark.parametrize("chunked", [True, False])
@pytest.mark.parametrize("array", ["numpy", "pyarrow"])
def test_constructor_not_string_type_raises(array, chunked, arrow_string_storage):
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
array = pa if array in arrow_string_storage else np
@pytest.mark.parametrize("chunked", [True, False])
def test_constructor_not_string_type_value_dictionary_raises(chunked):
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
arr = pa.array([1, 2, 3], pa.dictionary(pa.int32(), pa.int32()))
if chunked:
)
@pytest.mark.parametrize("chunked", [True, False])
def test_constructor_valid_string_type_value_dictionary(chunked):
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
arr = pa.array(["1", "2", "3"], pa.large_string()).dictionary_encode()
if chunked:
def test_constructor_from_list():
# GH#27673
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
result = pd.Series(["E"], dtype=StringDtype(storage="pyarrow"))
assert isinstance(result.dtype, StringDtype)
assert result.dtype.storage == "pyarrow"
def test_from_sequence_wrong_dtype_raises(using_infer_string):
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
with pd.option_context("string_storage", "python"):
ArrowStringArray._from_sequence(["a", None, "c"], dtype="string")
],
)
def test_setitem(multiple_chunks, key, value, expected):
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
result = pa.array(list("abcde"))
expected = pa.array(expected)
def test_setitem_invalid_indexer_raises():
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
arr = ArrowStringArray(pa.array(list("abcde")))
@pytest.mark.parametrize("dtype", ["string[pyarrow]", "string[pyarrow_numpy]"])
def test_pickle_roundtrip(dtype):
# GH 42600
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
expected = pd.Series(range(10), dtype=dtype)
expected_sliced = expected.head(2)
full_pickled = pickle.dumps(expected)
def test_string_dtype_error_message():
# GH#55051
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
msg = "Storage must be 'python', 'pyarrow' or 'pyarrow_numpy'."
with pytest.raises(ValueError, match=msg):
StringDtype("bla")
import pytest
+import pandas.util._test_decorators as td
from pandas.compat._optional import VERSIONS
import pandas as pd
from pandas.core.computation.check import NUMEXPR_INSTALLED
- ne = pytest.importorskip("numexpr")
+ ne = td.versioned_importorskip("numexpr")
ver = ne.__version__
if Version(ver) < Version(VERSIONS["numexpr"]):
@pytest.mark.parametrize("parser", expr.PARSERS)
def test_invalid_numexpr_version(engine, parser):
if engine == "numexpr":
- pytest.importorskip("numexpr")
+ td.versioned_importorskip("numexpr")
a, b = 1, 2 # noqa: F841
res = pd.eval("a + b", engine=engine, parser=parser)
assert res == 3
@pytest.mark.parametrize("new_dtype", ["int64", "Int64", "int64[pyarrow]"])
def test_astype_avoids_copy(using_copy_on_write, dtype, new_dtype):
if new_dtype == "int64[pyarrow]":
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = DataFrame({"a": [1, 2, 3]}, dtype=dtype)
df_orig = df.copy()
df2 = df.astype(new_dtype)
@pytest.mark.parametrize("dtype", ["float64", "int32", "Int32", "int32[pyarrow]"])
def test_astype_different_target_dtype(using_copy_on_write, dtype):
if dtype == "int32[pyarrow]":
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = DataFrame({"a": [1, 2, 3]})
df_orig = df.copy()
df2 = df.astype(dtype)
def test_astype_arrow_timestamp(using_copy_on_write):
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = DataFrame(
{
"a": [
def test_is_scipy_sparse():
- sp_sparse = pytest.importorskip("scipy.sparse")
+ sp_sparse = td.versioned_importorskip("scipy.sparse")
assert com.is_scipy_sparse(sp_sparse.bsr_matrix([1, 2, 3]))
import pytest
import pytz
+import pandas.util._test_decorators as td
from pandas._libs import (
lib,
missing as libmissing,
@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
def test_is_scipy_sparse(spmatrix):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
assert is_scipy_sparse(spmatrix([[0, 1]]))
assert not is_scipy_sparse(np.array([1]))
)
from pandas.tests.extension import base
-pa = pytest.importorskip("pyarrow")
+pa = td.versioned_importorskip("pyarrow")
from pandas.core.arrays.arrow.array import ArrowExtensionArray
from pandas.core.arrays.arrow.extension_types import ArrowPeriodType
import numpy as np
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
from pandas.api.types import is_string_dtype
elif arr.dtype.storage != "pyarrow":
return arr
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
arrow_array = arr._pa_array
split = len(arrow_array) // 2
)
def test_adding_new_conditional_column_with_string(dtype, infer_string) -> None:
# https://github.com/pandas-dev/pandas/issues/56204
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = DataFrame({"a": [1, 2], "b": [3, 4]})
with pd.option_context("future.infer_string", infer_string):
def test_add_new_column_infer_string():
# GH#55366
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = DataFrame({"x": [1]})
with pd.option_context("future.infer_string", True):
df.loc[df["x"] == 1, "y"] = "1"
def test_setitem_string_option_object_index(self):
# GH#55638
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = DataFrame({"a": [1, 2]})
with pd.option_context("future.infer_string", True):
df["b"] = Index(["a", "b"], dtype=object)
@pytest.mark.parametrize("dtype", ["int64", "Int64"])
def test_astype_copies(dtype):
# GH#50984
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = DataFrame({"a": [1, 2, 3]}, dtype=dtype)
result = df.astype("int64[pyarrow]", copy=True)
df.iloc[0, 0] = 100
import numpy as np
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
assert result.columns.name == "cols"
def test_pyarrow_dtype_backend(self):
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
df = pd.DataFrame(
{
"a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),
tm.assert_frame_equal(result, expected)
def test_pyarrow_dtype_backend_already_pyarrow(self):
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
expected = pd.DataFrame([1, 2, 3], dtype="int64[pyarrow]")
result = expected.convert_dtypes(dtype_backend="pyarrow")
tm.assert_frame_equal(result, expected)
def test_pyarrow_dtype_backend_from_pandas_nullable(self):
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
df = pd.DataFrame(
{
"a": pd.Series([1, 2, None], dtype="Int32"),
def test_pyarrow_dtype_empty_object(self):
# GH 50970
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
expected = pd.DataFrame(columns=[0])
result = expected.convert_dtypes(dtype_backend="pyarrow")
tm.assert_frame_equal(result, expected)
def test_pyarrow_backend_no_conversion(self):
# GH#52872
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = pd.DataFrame({"a": [1, 2], "b": 1.5, "c": True, "d": "x"})
expected = df.copy()
result = df.convert_dtypes(
def test_convert_dtypes_pyarrow_to_np_nullable(self):
# GH 53648
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
ser = pd.DataFrame(range(2), dtype="int32[pyarrow]")
result = ser.convert_dtypes(dtype_backend="numpy_nullable")
expected = pd.DataFrame(range(2), dtype="Int32")
def test_convert_dtypes_pyarrow_timestamp(self):
# GH 54191
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
ser = pd.Series(pd.date_range("2020-01-01", "2020-01-02", freq="1min"))
expected = ser.astype("timestamp[ms][pyarrow]")
result = expected.convert_dtypes(dtype_backend="pyarrow")
@pytest.mark.parametrize("method", ["pearson", "kendall", "spearman"])
def test_corr_scipy_method(self, float_frame, method):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
float_frame.loc[float_frame.index[:5], "A"] = np.nan
float_frame.loc[float_frame.index[5:10], "B"] = np.nan
float_frame.loc[float_frame.index[:10], "A"] = float_frame["A"][10:20].copy()
@pytest.mark.parametrize("meth", ["pearson", "kendall", "spearman"])
def test_corr_nooverlap(self, meth):
# nothing in common
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = DataFrame(
{
"A": [1, 1.5, 1, np.nan, np.nan, np.nan],
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=["a", "b"], columns=["a", "b"])
@pytest.mark.parametrize("method", ["pearson", "spearman", "kendall"])
def test_corr_nullable_integer(self, nullable_column, other_column, method):
# https://github.com/pandas-dev/pandas/issues/33803
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
data = DataFrame({"a": nullable_column, "b": other_column})
result = data.corr(method=method)
expected = DataFrame(np.ones((2, 2)), columns=["a", "b"], index=["a", "b"])
@pytest.mark.parametrize("method", ["pearson", "spearman", "kendall"])
def test_corr_min_periods_greater_than_length(self, method):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = DataFrame({"A": [1, 2], "B": [1, 2]})
result = df.corr(method=method, min_periods=3)
expected = DataFrame(
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = DataFrame({"a": [1, 0], "b": [1, 0], "c": ["x", "y"]})
expected = DataFrame(np.ones((2, 2)), index=["a", "b"], columns=["a", "b"])
if numeric_only:
def test_corrwith_spearman(self):
# GH#21925
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = DataFrame(np.random.default_rng(2).random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
def test_corrwith_kendall(self):
# GH#21925
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = DataFrame(np.random.default_rng(2).random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
def test_corrwith_spearman_with_tied_data(self):
# GH#48826
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df1 = DataFrame(
{
"A": [1, np.nan, 7, 8],
import numpy as np
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical,
def test_describe_exclude_pa_dtype(self):
# GH#52570
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
df = DataFrame(
{
"a": Series([1, 2, 3], dtype=pd.ArrowDtype(pa.int8())),
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
[("Float32", "Float64"), ("Int16", "Int32"), ("float[pyarrow]", "double[pyarrow]")],
)
def test_arrow_dtype(dtype, exp_dtype):
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
cols = ["a", "b"]
df_a = DataFrame([[1, 2], [3, 4], [5, 6]], columns=cols, dtype="int32")
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas.compat import (
IS64,
PYPY,
@pytest.mark.single_cpu
def test_info_compute_numba():
# GH#51922
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
df = DataFrame([[1, 2], [3, 4]])
with option_context("compute.use_numba", True):
df.interpolate(method="values")
def test_interp_various(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = DataFrame(
{"A": [1, 2, np.nan, 4, 5, np.nan, 7], "C": [1, 2, 3, 5, 8, 13, 21]}
)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_interp_alt_scipy(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = DataFrame(
{"A": [1, 2, np.nan, 4, 5, np.nan, 7], "C": [1, 2, 3, 5, 8, 13, 21]}
)
)
def test_interpolate_arrow(self, dtype):
# GH#55347
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = DataFrame({"a": [1, None, None, None, 3]}, dtype=dtype + "[pyarrow]")
result = df.interpolate(limit=2)
expected = DataFrame({"a": [1, 1.5, 2.0, None, 3]}, dtype="float64[pyarrow]")
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas.errors import MergeError
import pandas as pd
# GH 46622
# Dups on right allowed by one_to_many constraint
if dtype == "string[pyarrow]":
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
left_no_dup = left_no_dup.astype(dtype)
right_w_dups.index = right_w_dups.index.astype(dtype)
left_no_dup.join(
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas._libs.algos import (
Infinity,
NegInfinity,
return request.param
def test_rank(self, float_frame):
- sp_stats = pytest.importorskip("scipy.stats")
+ sp_stats = td.versioned_importorskip("scipy.stats")
float_frame.loc[::2, "A"] = np.nan
float_frame.loc[::3, "B"] = np.nan
float_string_frame.rank(axis=1)
def test_rank_na_option(self, float_frame):
- sp_stats = pytest.importorskip("scipy.stats")
+ sp_stats = td.versioned_importorskip("scipy.stats")
float_frame.loc[::2, "A"] = np.nan
float_frame.loc[::3, "B"] = np.nan
@pytest.mark.parametrize("ax", [0, 1])
@pytest.mark.parametrize("m", ["average", "min", "max", "first", "dense"])
def test_rank_methods_frame(self, ax, m):
- sp_stats = pytest.importorskip("scipy.stats")
+ sp_stats = td.versioned_importorskip("scipy.stats")
xs = np.random.default_rng(2).integers(0, 21, (100, 26))
xs = (xs - 10.0) / 10.0
)
def test_rank_string_dtype(self, dtype, exp_dtype):
# GH#55362
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
obj = Series(["foo", "foo", None, "foo"], dtype=dtype)
result = obj.rank(method="first")
expected = Series([1, 2, None, 3], dtype=exp_dtype)
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas._config import using_pyarrow_string_dtype
from pandas._config.config import option_context
def test_tab_complete_warning(self, ip, frame_or_series):
# GH 16409
- pytest.importorskip("IPython", minversion="6.0.0")
+ td.versioned_importorskip("IPython", min_version="6.0.0")
from IPython.core.completer import provisionalcompleter
if frame_or_series is DataFrame:
def test_inspect_getmembers(self):
# GH38740
- pytest.importorskip("jinja2")
+ td.versioned_importorskip("jinja2")
df = DataFrame()
msg = "DataFrame._data is deprecated"
with tm.assert_produces_warning(
import pandas as pd
-pa = pytest.importorskip("pyarrow")
+pa = td.versioned_importorskip("pyarrow")
@td.skip_if_no("pyarrow", min_version="14.0")
def test_frame_string_inference(self):
# GH#54430
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
dtype = "string[pyarrow_numpy]"
expected = DataFrame(
{"a": ["a", "b"]}, dtype=dtype, columns=Index(["a"], dtype=dtype)
def test_frame_string_inference_array_string_dtype(self):
# GH#54496
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
dtype = "string[pyarrow_numpy]"
expected = DataFrame(
{"a": ["a", "b"]}, dtype=dtype, columns=Index(["a"], dtype=dtype)
def test_frame_string_inference_block_dim(self):
# GH#55363
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
with pd.option_context("future.infer_string", True):
df = DataFrame(np.array([["hello", "goodbye"], ["hello", "Hello"]]))
assert df._mgr.blocks[0].ndim == 2
)
def test_dict_data_arrow_column_expansion(self, key_val, col_vals, col_type):
# GH 53617
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
cols = pd.arrays.ArrowExtensionArray(
pa.array(col_vals, type=pa.dictionary(pa.int8(), getattr(pa, col_type)()))
)
@pytest.mark.parametrize("dtype", ["int64", "Int64", "int64[pyarrow]"])
def test_query_ea_dtypes(self, dtype):
if dtype == "int64[pyarrow]":
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
# GH#50261
df = DataFrame({"a": Series([1, 2], dtype=dtype)})
ref = {2} # noqa: F841
if engine == "numexpr" and not NUMEXPR_INSTALLED:
pytest.skip("numexpr not installed")
if dtype == "int64[pyarrow]":
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = DataFrame(
{"A": Series([1, 1, 2], dtype="Int64"), "B": Series([1, 2, 2], dtype=dtype)}
)
)
def test_stat_op_calc_skew_kurtosis(self, float_frame_with_na):
- sp_stats = pytest.importorskip("scipy.stats")
+ sp_stats = td.versioned_importorskip("scipy.stats")
def skewness(x):
if len(x) < 3:
def test_idxmax_arrow_types(self):
# GH#55368
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = DataFrame({"a": [2, 3, 1], "b": [2, 1, 1]}, dtype="int64[pyarrow]")
result = df.idxmax()
result = getattr(df, method)(axis=None, numeric_only=numeric_only)
np_arr = df.to_numpy(dtype=np.float64)
if method in {"skew", "kurt"}:
- comp_mod = pytest.importorskip("scipy.stats")
+ comp_mod = td.versioned_importorskip("scipy.stats")
if method == "kurt":
method = "kurtosis"
expected = getattr(comp_mod, method)(np_arr, bias=False, axis=None)
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas._config import using_pyarrow_string_dtype
from pandas import (
assert "StringCol" in repr(df)
def test_latex_repr(self):
- pytest.importorskip("jinja2")
+ td.versioned_importorskip("jinja2")
expected = r"""\begin{tabular}{llll}
\toprule
& 0 & 1 & 2 \\
def test_repr_ea_columns(self, any_string_dtype):
# GH#54797
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = DataFrame({"long_column_name": [1, 2, 3], "col2": [4, 5, 6]})
df.columns = df.columns.astype(any_string_dtype)
expected = """ long_column_name col2
import numpy as np
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
assert isinstance(result, tm.SubclassedSeries)
def test_corrwith(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
index = ["a", "b", "c", "d", "e"]
columns = ["one", "two", "three", "four"]
df1 = tm.SubclassedDataFrame(
import numpy as np
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
from pandas.api.types import is_extension_array_dtype
# https://github.com/pandas-dev/pandas/issues/39184
# test that the deprecation also works with > 2 inputs -> using a numba
# written ufunc for this because numpy itself doesn't have such ufuncs
- numba = pytest.importorskip("numba")
+ numba = td.versioned_importorskip("numba")
@numba.vectorize([numba.float64(numba.float64, numba.float64, numba.float64)])
def my_ufunc(x, y, z):
import numpy as np
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
@not_implemented_mark
def test_finalize_called_eval_numexpr():
- pytest.importorskip("numexpr")
+ td.versioned_importorskip("numexpr")
df = pd.DataFrame({"A": [1, 2]})
df.attrs["A"] = 1
result = df.eval("A + 1", engine="numexpr")
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas import (
Categorical,
DataFrame,
)
import pandas._testing as tm
-pytest.importorskip("xarray")
+td.versioned_importorskip("xarray")
class TestDataFrameToXArray:
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas.errors import NumbaUtilError
from pandas import (
def test_correct_function_signature():
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
def incorrect_function(x):
return sum(x) * 2.7
def test_check_nopython_kwargs():
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
def incorrect_function(values, index):
return sum(values) * 2.7
@pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])
@pytest.mark.parametrize("as_index", [True, False])
def test_numba_vs_cython(jit, pandas_obj, nogil, parallel, nopython, as_index):
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
def func_numba(values, index):
return np.mean(values) * 2.7
@pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])
def test_cache(jit, pandas_obj, nogil, parallel, nopython):
# Test that the functions are cached correctly if we switch functions
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
def func_1(values, index):
return np.mean(values) - 3.4
def test_use_global_config():
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
def func_1(values, index):
return np.mean(values) - 3.4
],
)
def test_multifunc_numba_vs_cython_frame(agg_kwargs):
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
data = DataFrame(
{
0: ["a", "a", "b", "b", "a"],
],
)
def test_multifunc_numba_udf_frame(agg_kwargs, expected_func):
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
data = DataFrame(
{
0: ["a", "a", "b", "b", "a"],
[{"func": ["min", "max"]}, {"func": "min"}, {"min_val": "min", "max_val": "max"}],
)
def test_multifunc_numba_vs_cython_series(agg_kwargs):
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
labels = ["a", "a", "b", "b", "a"]
data = Series([1.0, 2.0, 3.0, 4.0, 5.0])
grouped = data.groupby(labels)
strict=False,
)
def test_multifunc_numba_kwarg_propagation(data, agg_kwargs):
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
labels = ["a", "a", "b", "b", "a"]
grouped = data.groupby(labels)
result = grouped.agg(**agg_kwargs, engine="numba", engine_kwargs={"parallel": True})
def test_args_not_cached():
# GH 41647
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
def sum_last(values, index, n):
return values[-n:].sum()
def test_index_data_correctly_passed():
# GH 43133
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
def f(values, index):
return np.mean(index)
def test_engine_kwargs_not_cached():
# If the user passes a different set of engine_kwargs don't return the same
# jitted function
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
nogil = True
parallel = False
nopython = True
@pytest.mark.filterwarnings("ignore")
def test_multiindex_one_key(nogil, parallel, nopython):
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
def numba_func(values, index):
return 1
def test_multiindex_multi_key_not_supported(nogil, parallel, nopython):
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
def numba_func(values, index):
return 1
def test_multilabel_numba_vs_cython(numba_supported_reductions):
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
reduction, kwargs = numba_supported_reductions
df = DataFrame(
{
def test_multilabel_udf_numba_vs_cython():
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Index,
def test_count_arrow_string_array(any_string_dtype):
# GH#54751
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = DataFrame(
{"a": [1, 2, 3], "b": Series(["a", "b", "a"], dtype=any_string_dtype)}
)
def test_groupby_duplicate_columns(infer_string):
# GH: 31735
if infer_string:
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = DataFrame(
{"A": ["f", "e", "g", "h"], "B": ["a", "b", "c", "d"], "C": [1, 2, 3, 4]}
).astype(object)
import pytest
+import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
pytestmark = pytest.mark.single_cpu
-pytest.importorskip("numba")
+td.versioned_importorskip("numba")
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize("func", ["min", "max"])
def test_min_empty_string_dtype(func):
# GH#55619
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
dtype = "string[pyarrow_numpy]"
df = DataFrame({"a": ["a"], "b": "a", "c": "a"}, dtype=dtype).iloc[:0]
result = getattr(df.groupby("a"), func)()
import pytest
import pytz
+import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
def test_groupby_agg_numba_timegrouper_with_nat(
self, groupby_with_truncated_bingrouper
):
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
# See discussion in GH#43487
gb = groupby_with_truncated_bingrouper
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas.errors import NumbaUtilError
from pandas import (
def test_correct_function_signature():
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
def incorrect_function(x):
return x + 1
def test_check_nopython_kwargs():
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
def incorrect_function(values, index):
return values + 1
@pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])
@pytest.mark.parametrize("as_index", [True, False])
def test_numba_vs_cython(jit, pandas_obj, nogil, parallel, nopython, as_index):
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
def func(values, index):
return values + 1
@pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])
def test_cache(jit, pandas_obj, nogil, parallel, nopython):
# Test that the functions are cached correctly if we switch functions
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
def func_1(values, index):
return values + 1
def test_use_global_config():
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
def func_1(values, index):
return values + 1
"agg_func", [["min", "max"], "min", {"B": ["min", "max"], "C": "sum"}]
)
def test_string_cython_vs_numba(agg_func, numba_supported_reductions):
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
agg_func, kwargs = numba_supported_reductions
data = DataFrame(
{0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1]
def test_args_not_cached():
# GH 41647
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
def sum_last(values, index, n):
return values[-n:].sum()
def test_index_data_correctly_passed():
# GH 43133
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
def f(values, index):
return index - 1
def test_engine_kwargs_not_cached():
# If the user passes a different set of engine_kwargs don't return the same
# jitted function
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
nogil = True
parallel = False
nopython = True
@pytest.mark.filterwarnings("ignore")
def test_multiindex_one_key(nogil, parallel, nopython):
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
def numba_func(values, index):
return 1
def test_multiindex_multi_key_not_supported(nogil, parallel, nopython):
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
def numba_func(values, index):
return 1
def test_multilabel_numba_vs_cython(numba_supported_reductions):
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
reduction, kwargs = numba_supported_reductions
df = DataFrame(
{
def test_multilabel_udf_numba_vs_cython():
- pytest.importorskip("numba")
+ td.versioned_importorskip("numba")
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
import numpy as np
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Index,
def test_index_string_inference(self):
# GH#54430
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
dtype = "string[pyarrow_numpy]"
expected = Index(["a", "b"], dtype=dtype)
with pd.option_context("future.infer_string", True):
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas import Index
import pandas._testing as tm
def test_insert_none_into_string_numpy(self):
# GH#55365
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
index = Index(["a", "b", "c"], dtype="string[pyarrow_numpy]")
result = index.insert(-1, None)
expected = Index(["a", "b", None, "c"], dtype="string[pyarrow_numpy]")
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
import pandas as pd
def test_from_frame_missing_values_multiIndex():
# GH 39984
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
df = pd.DataFrame(
{
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas.errors import InvalidIndexError
from pandas import (
def test_get_indexer_masked_na_boolean(self, dtype):
# GH#39133
if dtype == "bool[pyarrow]":
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
idx = Index([True, False, NA], dtype=dtype)
result = idx.get_loc(False)
assert result == 1
assert result == 2
def test_get_indexer_arrow_dictionary_target(self):
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
target = Index(
ArrowExtensionArray(
pa.array([1, 2], type=pa.dictionary(pa.int8(), pa.int8()))
def test_tab_complete_warning(self, ip):
# https://github.com/pandas-dev/pandas/issues/16409
- pytest.importorskip("IPython", minversion="6.0.0")
+ td.versioned_importorskip("IPython", min_version="6.0.0")
from IPython.core.completer import provisionalcompleter
code = "import pandas as pd; idx = pd.Index([1, 2])"
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
def test_getitem_pyarrow_index(self, frame_or_series):
# GH 53644
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
obj = frame_or_series(
range(5),
index=date_range("2020", freq="D", periods=5).astype(
@pytest.mark.parametrize("spmatrix_t", ["coo_matrix", "csc_matrix", "csr_matrix"])
@pytest.mark.parametrize("dtype", [np.int64, np.float64, complex])
def test_loc_getitem_range_from_spmatrix(self, spmatrix_t, dtype):
- sp_sparse = pytest.importorskip("scipy.sparse")
+ sp_sparse = td.versioned_importorskip("scipy.sparse")
spmatrix_t = getattr(sp_sparse, spmatrix_t)
def test_loc_getitem_sparse_frame(self):
# GH34687
- sp_sparse = pytest.importorskip("scipy.sparse")
+ sp_sparse = td.versioned_importorskip("scipy.sparse")
df = DataFrame.sparse.from_spmatrix(sp_sparse.eye(5))
result = df.loc[range(2)]
def test_loc_setitem_pyarrow_strings():
# GH#52319
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = DataFrame(
{
"strings": Series(["A", "B", "C"], dtype="string[pyarrow]"),
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas._libs.tslibs import iNaT
from pandas.compat import (
is_ci_environment,
def test_categorical_pyarrow():
# GH 49889
- pa = pytest.importorskip("pyarrow", "11.0.0")
+ pa = td.versioned_importorskip("pyarrow", "11.0.0")
arr = ["Mon", "Tue", "Mon", "Wed", "Mon", "Thu", "Fri", "Sat", "Sun"]
table = pa.table({"weekday": pa.array(arr).dictionary_encode()})
def test_empty_categorical_pyarrow():
# https://github.com/pandas-dev/pandas/issues/53077
- pa = pytest.importorskip("pyarrow", "11.0.0")
+ pa = td.versioned_importorskip("pyarrow", "11.0.0")
arr = [None]
table = pa.table({"arr": pa.array(arr, "float64").dictionary_encode()})
def test_large_string_pyarrow():
# GH 52795
- pa = pytest.importorskip("pyarrow", "11.0.0")
+ pa = td.versioned_importorskip("pyarrow", "11.0.0")
arr = ["Mon", "Tue"]
table = pa.table({"weekday": pa.array(arr, "large_string")})
)
def test_bitmasks_pyarrow(offset, length, expected_values):
# GH 52795
- pa = pytest.importorskip("pyarrow", "11.0.0")
+ pa = td.versioned_importorskip("pyarrow", "11.0.0")
arr = [3.3, None, 2.1]
table = pa.table({"arr": arr}).slice(offset, length)
@pytest.mark.parametrize("data", [{}, {"a": []}])
def test_empty_pyarrow(data):
# GH 53155
- pytest.importorskip("pyarrow", "11.0.0")
+ td.versioned_importorskip("pyarrow", "11.0.0")
from pyarrow.interchange import from_dataframe as pa_from_dataframe
expected = pd.DataFrame(data)
def test_multi_chunk_pyarrow() -> None:
- pa = pytest.importorskip("pyarrow", "11.0.0")
+ pa = td.versioned_importorskip("pyarrow", "11.0.0")
n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]])
names = ["n_legs"]
table = pa.table([n_legs], names=names)
def test_multi_chunk_column() -> None:
- pytest.importorskip("pyarrow", "11.0.0")
+ td.versioned_importorskip("pyarrow", "11.0.0")
ser = pd.Series([1, 2, None], dtype="Int64[pyarrow]")
df = pd.concat([ser, ser], ignore_index=True).to_frame("a")
df_orig = df.copy()
def test_timestamp_ns_pyarrow():
# GH 56712
- pytest.importorskip("pyarrow", "11.0.0")
+ td.versioned_importorskip("pyarrow", "11.0.0")
timestamp_args = {
"year": 2000,
"month": 1,
def test_interchange_from_non_pandas_tz_aware(request):
# GH 54239, 54287
- pa = pytest.importorskip("pyarrow", "11.0.0")
+ pa = td.versioned_importorskip("pyarrow", "11.0.0")
import pyarrow.compute as pc
if is_platform_windows() and is_ci_environment():
def test_large_string():
# GH#56702
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = pd.DataFrame({"a": ["x"]}, dtype="large_string[pyarrow]")
result = pd.api.interchange.from_dataframe(df.__dataframe__())
expected = pd.DataFrame({"a": ["x"]}, dtype="object")
) -> None:
# https://github.com/pandas-dev/pandas/issues/57643
# https://github.com/pandas-dev/pandas/issues/57664
- pa = pytest.importorskip("pyarrow", "11.0.0")
+ pa = td.versioned_importorskip("pyarrow", "11.0.0")
import pyarrow.interchange as pai
if expected_dtype == "timestamp[us, tz=Asia/Kathmandu]":
data: list, dtype: str, expected_dtype: str
) -> None:
# https://github.com/pandas-dev/pandas/issues/57643
- pa = pytest.importorskip("pyarrow", "11.0.0")
+ pa = td.versioned_importorskip("pyarrow", "11.0.0")
import pyarrow.interchange as pai
if expected_dtype == "timestamp[us, tz=Asia/Kathmandu]":
def test_string_validity_buffer() -> None:
# https://github.com/pandas-dev/pandas/issues/57761
- pytest.importorskip("pyarrow", "11.0.0")
+ td.versioned_importorskip("pyarrow", "11.0.0")
df = pd.DataFrame({"a": ["x"]}, dtype="large_string[pyarrow]")
result = df.__dataframe__().get_column_by_name("a").get_buffers()["validity"]
assert result is None
def test_string_validity_buffer_no_missing() -> None:
# https://github.com/pandas-dev/pandas/issues/57762
- pytest.importorskip("pyarrow", "11.0.0")
+ td.versioned_importorskip("pyarrow", "11.0.0")
df = pd.DataFrame({"a": ["x", None]}, dtype="large_string[pyarrow]")
validity = df.__dataframe__().get_column_by_name("a").get_buffers()["validity"]
assert validity is not None
import numpy as np
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
from pandas.core.interchange.utils import dtype_to_arrow_c_fmt
)
def test_dtype_to_arrow_c_fmt_arrowdtype(pa_dtype, args_kwargs, c_string):
# GH 52323
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
if not args_kwargs:
pa_type = getattr(pa, pa_dtype)()
elif isinstance(args_kwargs, tuple):
Sets up moto server in separate process locally
Return url for motoserver/moto CI service
"""
- pytest.importorskip("s3fs")
- pytest.importorskip("boto3")
+ td.versioned_importorskip("s3fs")
+ td.versioned_importorskip("boto3")
# temporary workaround as moto fails for botocore >= 1.11 otherwise,
# see https://github.com/spulec/moto/issues/1924 & 1952
# set in .github/workflows/unit-tests.yml
yield "http://localhost:5000"
else:
- requests = pytest.importorskip("requests")
- pytest.importorskip("moto")
- pytest.importorskip("flask") # server mode needs flask too
+ requests = td.versioned_importorskip("requests")
+ td.versioned_importorskip("moto")
+ td.versioned_importorskip("flask") # server mode needs flask too
# Launching moto in server mode, i.e., as a separate process
# with an S3 endpoint on localhost
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas.compat import is_platform_windows
import pandas as pd
import pandas._testing as tm
-pytest.importorskip("odf")
+td.versioned_importorskip("odf")
if is_platform_windows():
pytestmark = pytest.mark.single_cpu
import pytest
+import pandas.util._test_decorators as td
from pandas.compat import is_platform_windows
import pandas as pd
from pandas.io.excel import ExcelWriter
-odf = pytest.importorskip("odf")
+odf = td.versioned_importorskip("odf")
if is_platform_windows():
pytestmark = pytest.mark.single_cpu
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas.compat import is_platform_windows
import pandas as pd
)
from pandas.io.excel._openpyxl import OpenpyxlReader
-openpyxl = pytest.importorskip("openpyxl")
+openpyxl = td.versioned_importorskip("openpyxl")
if is_platform_windows():
pytestmark = pytest.mark.single_cpu
if read_ext in (".xlsb", ".xls"):
pytest.skip(f"No engine for filetype: '{read_ext}'")
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
with pd.option_context("mode.string_storage", string_storage):
df = DataFrame(
from pandas.io.excel import ExcelWriter
from pandas.io.formats.excel import ExcelFormatter
-pytest.importorskip("jinja2")
+td.versioned_importorskip("jinja2")
# jinja2 is currently required for Styler.__init__(). Technically Styler.to_excel
# could compute styles and render to excel without jinja2, since there is no
# 'template' file, but this needs the import error to delayed until render time.
)
def test_styler_to_excel_unstyled(engine):
# compare DataFrame.to_excel and Styler.to_excel when no styles applied
- pytest.importorskip(engine)
+ td.versioned_importorskip(engine)
df = DataFrame(np.random.default_rng(2).standard_normal((2, 2)))
with tm.ensure_clean(".xlsx") as path:
with ExcelWriter(path, engine=engine) as writer:
df.to_excel(writer, sheet_name="dataframe")
df.style.to_excel(writer, sheet_name="unstyled")
- openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl
+ openpyxl = td.versioned_importorskip("openpyxl") # test loading only with openpyxl
with contextlib.closing(openpyxl.load_workbook(path)) as wb:
for col1, col2 in zip(wb["dataframe"].columns, wb["unstyled"].columns):
assert len(col1) == len(col2)
)
@pytest.mark.parametrize("css, attrs, expected", shared_style_params)
def test_styler_to_excel_basic(engine, css, attrs, expected):
- pytest.importorskip(engine)
+ td.versioned_importorskip(engine)
df = DataFrame(np.random.default_rng(2).standard_normal((1, 1)))
styler = df.style.map(lambda x: css)
df.to_excel(writer, sheet_name="dataframe")
styler.to_excel(writer, sheet_name="styled")
- openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl
+ openpyxl = td.versioned_importorskip("openpyxl") # test loading only with openpyxl
with contextlib.closing(openpyxl.load_workbook(path)) as wb:
# test unstyled data cell does not have expected styles
# test styled cell has expected styles
)
@pytest.mark.parametrize("css, attrs, expected", shared_style_params)
def test_styler_to_excel_basic_indexes(engine, css, attrs, expected):
- pytest.importorskip(engine)
+ td.versioned_importorskip(engine)
df = DataFrame(np.random.default_rng(2).standard_normal((1, 1)))
styler = df.style
null_styler.to_excel(writer, sheet_name="null_styled")
styler.to_excel(writer, sheet_name="styled")
- openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl
+ openpyxl = td.versioned_importorskip("openpyxl") # test loading only with openpyxl
with contextlib.closing(openpyxl.load_workbook(path)) as wb:
# test null styled index cells does not have expected styles
# test styled cell has expected styles
attrs = ["border", "left", "style"]
expected = border_style
- pytest.importorskip(engine)
+ td.versioned_importorskip(engine)
df = DataFrame(np.random.default_rng(2).standard_normal((1, 1)))
styler = df.style.map(lambda x: css)
df.to_excel(writer, sheet_name="dataframe")
styler.to_excel(writer, sheet_name="styled")
- openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl
+ openpyxl = td.versioned_importorskip("openpyxl") # test loading only with openpyxl
with contextlib.closing(openpyxl.load_workbook(path)) as wb:
# test unstyled data cell does not have expected styles
# test styled cell has expected styles
def test_styler_custom_converter():
- openpyxl = pytest.importorskip("openpyxl")
+ openpyxl = td.versioned_importorskip("openpyxl")
def custom_converter(css):
return {"font": {"color": {"rgb": "111222"}}}
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas.compat import is_platform_windows
import pandas as pd
from pandas.io.excel import ExcelFile
from pandas.io.excel._base import inspect_excel_format
-xlrd = pytest.importorskip("xlrd")
+xlrd = td.versioned_importorskip("xlrd")
if is_platform_windows():
pytestmark = pytest.mark.single_cpu
import pytest
+import pandas.util._test_decorators as td
from pandas.compat import is_platform_windows
from pandas import DataFrame
from pandas.io.excel import ExcelWriter
-xlsxwriter = pytest.importorskip("xlsxwriter")
+xlsxwriter = td.versioned_importorskip("xlsxwriter")
if is_platform_windows():
pytestmark = pytest.mark.single_cpu
def test_column_format(ext):
# Test that column formats are applied to cells. Test for issue #9167.
# Applicable to xlsxwriter only.
- openpyxl = pytest.importorskip("openpyxl")
+ openpyxl = td.versioned_importorskip("openpyxl")
with tm.ensure_clean(ext) as path:
frame = DataFrame({"A": [123456, 123456], "B": [123456, 123456]})
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas import (
NA,
DataFrame,
read_csv,
)
-pytest.importorskip("jinja2")
+td.versioned_importorskip("jinja2")
def bar_grad(a=None, b=None, c=None, d=None):
import pytest
-jinja2 = pytest.importorskip("jinja2")
+import pandas.util._test_decorators as td
+jinja2 = td.versioned_importorskip("jinja2")
from pandas import (
DataFrame,
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas import (
NA,
DataFrame,
option_context,
)
-pytest.importorskip("jinja2")
+td.versioned_importorskip("jinja2")
from pandas.io.formats.style import Styler
from pandas.io.formats.style_render import _str_escape
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas import (
NA,
DataFrame,
IndexSlice,
)
-pytest.importorskip("jinja2")
+td.versioned_importorskip("jinja2")
from pandas.io.formats.style import Styler
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas import (
DataFrame,
MultiIndex,
option_context,
)
-jinja2 = pytest.importorskip("jinja2")
+jinja2 = td.versioned_importorskip("jinja2")
from pandas.io.formats.style import Styler
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas import (
DataFrame,
IndexSlice,
Series,
)
-pytest.importorskip("matplotlib")
-pytest.importorskip("jinja2")
+td.versioned_importorskip("matplotlib")
+td.versioned_importorskip("jinja2")
import matplotlib as mpl
# 1) Resets units registry
# 2) Resets rc_context
# 3) Closes all figures
- mpl = pytest.importorskip("matplotlib")
- mpl_units = pytest.importorskip("matplotlib.units")
- plt = pytest.importorskip("matplotlib.pyplot")
+ mpl = td.versioned_importorskip("matplotlib")
+ mpl_units = td.versioned_importorskip("matplotlib.units")
+ plt = td.versioned_importorskip("matplotlib.pyplot")
orig_units_registry = mpl_units.registry.copy()
with mpl.rc_context():
mpl.use("template")
import pytest
+import pandas.util._test_decorators as td
from pandas import (
DataFrame,
IndexSlice,
)
-pytest.importorskip("jinja2")
+td.versioned_importorskip("jinja2")
from pandas.io.formats.style import Styler
import pandas._testing as tm
import pandas.util._test_decorators as td
-jinja2 = pytest.importorskip("jinja2")
+jinja2 = td.versioned_importorskip("jinja2")
from pandas.io.formats.style import ( # isort:skip
Styler,
)
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas import (
DataFrame,
MultiIndex,
option_context,
)
-pytest.importorskip("jinja2")
+td.versioned_importorskip("jinja2")
from pandas.io.formats.style import Styler
from pandas.io.formats.style_render import (
_parse_latex_cell_styles,
import pytest
+import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
)
-pytest.importorskip("jinja2")
+td.versioned_importorskip("jinja2")
from pandas.io.formats.style import Styler
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas import DataFrame
-pytest.importorskip("jinja2")
+td.versioned_importorskip("jinja2")
from pandas.io.formats.style import Styler
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas._config import using_pyarrow_string_dtype
import pandas as pd
):
df = DataFrame([data])
if method in ["to_latex"]: # uses styler implementation
- pytest.importorskip("jinja2")
+ td.versioned_importorskip("jinja2")
if filepath_or_buffer_id not in ["string", "pathlike"] and encoding is not None:
with pytest.raises(
@pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"])
def test_filepath_or_buffer_bad_arg_raises(float_frame, method):
if method in ["to_latex"]: # uses styler implementation
- pytest.importorskip("jinja2")
+ td.versioned_importorskip("jinja2")
msg = "buf is not a file name and it has no write method"
with pytest.raises(TypeError, match=msg):
getattr(float_frame, method)(buf=object())
import pytest
+import pandas.util._test_decorators as td
from pandas.errors import CSSWarning
import pandas._testing as tm
def test_css_named_colors_from_mpl_present():
- mpl_colors = pytest.importorskip("matplotlib.colors")
+ mpl_colors = td.versioned_importorskip("matplotlib.colors")
pd_colors = CSSToExcelConverter.NAMED_COLORS
for name, color in mpl_colors.CSS4_COLORS.items():
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
)
import pandas._testing as tm
-pytest.importorskip("jinja2")
+td.versioned_importorskip("jinja2")
def _dedent(string):
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
-pytest.importorskip("tabulate")
+td.versioned_importorskip("tabulate")
def test_simple():
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas._config import using_pyarrow_string_dtype
from pandas import (
def test_to_string_string_dtype(self):
# GH#50099
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = DataFrame(
{"x": ["foo", "bar", "baz"], "y": ["a", "b", "c"], "z": [1, 2, 3]}
)
self, string_storage, dtype_backend, orient, using_infer_string
):
# GH#50750
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
df = DataFrame(
{
"a": Series([1, np.nan, 3], dtype="Int64"),
string_array_na = StringArray(np.array(["a", "b", NA], dtype=np.object_))
elif dtype_backend == "pyarrow":
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
from pandas.arrays import ArrowExtensionArray
string_array = ArrowExtensionArray(pa.array(["a", "b", "c"]))
@pytest.mark.parametrize("orient", ["split", "records", "index"])
def test_read_json_nullable_series(self, string_storage, dtype_backend, orient):
# GH#50750
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
ser = Series([1, np.nan, 3], dtype="Int64")
out = ser.to_json(orient=orient)
def test_json_roundtrip_string_inference(orient):
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = DataFrame(
[["a", "b"], ["c", "d"]], index=["row 1", "row 2"], columns=["col 1", "col 2"]
)
import numpy as np
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
@pytest.fixture(params=["ujson", "pyarrow"])
def engine(request):
if request.param == "pyarrow":
- pytest.importorskip("pyarrow.json")
+ td.versioned_importorskip("pyarrow.json")
return request.param
import pytest
+import pandas.util._test_decorators as td
from pandas.compat._optional import VERSIONS
from pandas import (
"""
parser = request.param()
if parser.engine == "pyarrow":
- pytest.importorskip("pyarrow", VERSIONS["pyarrow"])
+ td.versioned_importorskip("pyarrow", VERSIONS["pyarrow"])
# Try finding a way to disable threads all together
# for more stable CI runs
import pyarrow
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas.errors import ParserWarning
import pandas as pd
def test_dtype_backend_string(all_parsers, string_storage):
# GH#36712
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
with pd.option_context("mode.string_storage", string_storage):
parser = all_parsers
def test_dtype_backend_pyarrow(all_parsers, request):
# GH#36712
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
parser = all_parsers
data = """a,b,c,d,e,f,g,h,i,j
def test_string_inference(all_parsers):
# GH#54430
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
dtype = "string[pyarrow_numpy]"
data = """a,b
@pytest.mark.parametrize("dtype", ["O", object, "object", np.object_, str, np.str_])
def test_string_inference_object_dtype(all_parsers, dtype):
# GH#56047
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
data = """a,b
x,a
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas.errors import DtypeWarning
import pandas._testing as tm
def test_concatenate_chunks_pyarrow():
# GH#51876
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
chunks = [
{0: ArrowExtensionArray(pa.array([1.5, 2.5]))},
{0: ArrowExtensionArray(pa.array([1, 2]))},
def test_concatenate_chunks_pyarrow_strings():
# GH#51876
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
chunks = [
{0: ArrowExtensionArray(pa.array([1.5, 2.5]))},
{0: ArrowExtensionArray(pa.array(["a", "b"]))},
def test_parse_public_s3_bucket(self, s3_public_bucket_with_data, tips_df, s3so):
# more of an integration test due to the not-public contents portion
# can probably mock this though.
- pytest.importorskip("s3fs")
+ td.versioned_importorskip("s3fs")
for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]:
df = read_csv(
f"s3://{s3_public_bucket_with_data.name}/tips.csv" + ext,
def test_parse_private_s3_bucket(self, s3_private_bucket_with_data, tips_df, s3so):
# Read public file from bucket with not-public contents
- pytest.importorskip("s3fs")
+ td.versioned_importorskip("s3fs")
df = read_csv(
f"s3://{s3_private_bucket_with_data.name}/tips.csv", storage_options=s3so
)
def test_write_s3_parquet_fails(self, tips_df, s3so):
# GH 27679
# Attempting to write to an invalid S3 path should raise
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
import botocore
# GH 34087
self, s3_public_bucket_with_data, feather_file, s3so
):
# GH 29055
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
expected = read_feather(feather_file)
res = read_feather(
f"s3://{s3_public_bucket_with_data.name}/simple_dataset.feather",
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas.errors import (
ParserError,
ParserWarning,
data = data.replace(b",", b"::")
expected = parser.read_csv(csv1)
- module = pytest.importorskip(compression)
+ module = td.versioned_importorskip(compression)
klass = getattr(module, klass)
with tm.ensure_clean() as path:
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas.errors import EmptyDataError
import pandas as pd
arr = StringArray(np.array(["a", "b"], dtype=np.object_))
arr_na = StringArray(np.array([pd.NA, "a"], dtype=np.object_))
elif dtype_backend == "pyarrow":
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
from pandas.arrays import ArrowExtensionArray
arr = ArrowExtensionArray(pa.array(["a", "b"]))
arr_na = ArrowExtensionArray(pa.array([None, "a"]))
else:
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
arr = ArrowStringArray(pa.array(["a", "b"]))
arr_na = ArrowStringArray(pa.array([None, "a"]))
}
)
if dtype_backend == "pyarrow":
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
from pandas.arrays import ArrowExtensionArray
expected = DataFrame(
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas._libs.parsers import (
_maybe_upcast,
na_values,
@pytest.mark.parametrize("val", [na_values[np.object_], "c"])
def test_maybe_upcast_object(val, string_storage):
# GH#36712
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
with pd.option_context("mode.string_storage", string_storage):
arr = np.array(["a", "b", val], dtype=np.object_)
import pytest
+import pandas.util._test_decorators as td
from pandas.io.pytables import HDFStore
-tables = pytest.importorskip("tables")
+tables = td.versioned_importorskip("tables")
# set these parameters so we don't have file sharing
tables.parameters.MAX_NUMEXPR_THREADS = 1
tables.parameters.MAX_BLOSC_THREADS = 1
pytestmark = pytest.mark.single_cpu
-tables = pytest.importorskip("tables")
+tables = td.versioned_importorskip("tables")
@pytest.mark.filterwarnings("ignore::tables.NaturalNameWarning")
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
-tables = pytest.importorskip("tables")
+tables = td.versioned_importorskip("tables")
@pytest.fixture
def test_read_infer_string(tmp_path, setup_path):
# GH#54431
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = DataFrame({"a": ["a", "b", None]})
path = tmp_path / setup_path
df.to_hdf(path, key="data", format="table")
def test_infer_string_columns(tmp_path, setup_path):
# GH#
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
path = tmp_path / setup_path
with pd.option_context("future.infer_string", True):
df = DataFrame(1, columns=list("ABCD"), index=list(range(10))).set_index(
import numpy as np
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
pytestmark = pytest.mark.single_cpu
-tables = pytest.importorskip("tables")
+tables = td.versioned_importorskip("tables")
def test_context(setup_path):
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
read_hdf,
)
-pytest.importorskip("tables")
+td.versioned_importorskip("tables")
class TestHDFStoreSubclass:
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas.errors import (
PyperclipException,
PyperclipWindowsException,
):
# GH#50502
if string_storage == "pyarrow" or dtype_backend == "pyarrow":
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
if string_storage == "python":
string_array = StringArray(np.array(["x", "y"], dtype=np.object_))
string_array_na = StringArray(np.array(["x", NA], dtype=np.object_))
elif dtype_backend == "pyarrow" and engine != "c":
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
from pandas.arrays import ArrowExtensionArray
string_array = ArrowExtensionArray(pa.array(["x", "y"]))
def test_stringify_file_and_path_like(self):
# GH 38125: do not stringify file objects that are also path-like
- fsspec = pytest.importorskip("fsspec")
+ fsspec = td.versioned_importorskip("fsspec")
with tm.ensure_clean() as path:
with fsspec.open(f"file://{path}", mode="wb") as fsspec_obj:
assert fsspec_obj == icom.stringify_path(fsspec_obj)
# Test that pyarrow can handle a file opened with get_handle
def test_get_handle_pyarrow_compat(self):
- pa_csv = pytest.importorskip("pyarrow.csv")
+ pa_csv = td.versioned_importorskip("pyarrow.csv")
# Test latin1, ucs-2, and ucs-4 chars
data = """a,b,c
],
)
def test_read_non_existent(self, reader, module, error_class, fn_ext):
- pytest.importorskip(module)
+ td.versioned_importorskip(module)
path = os.path.join(HERE, "data", "does_not_exist." + fn_ext)
msg1 = rf"File (b')?.+does_not_exist\.{fn_ext}'? does not exist"
)
# NOTE: Missing parent directory for pd.DataFrame.to_hdf is handled by PyTables
def test_write_missing_parent_directory(self, method, module, error_class, fn_ext):
- pytest.importorskip(module)
+ td.versioned_importorskip(module)
dummy_frame = pd.DataFrame({"a": [1, 2, 3], "b": [2, 3, 4], "c": [3, 4, 5]})
def test_read_expands_user_home_dir(
self, reader, module, error_class, fn_ext, monkeypatch
):
- pytest.importorskip(module)
+ td.versioned_importorskip(module)
path = os.path.join("~", "does_not_exist." + fn_ext)
monkeypatch.setattr(icom, "_expand_user", lambda x: os.path.join("foo", x))
],
)
def test_read_fspath_all(self, reader, module, path, datapath):
- pytest.importorskip(module)
+ td.versioned_importorskip(module)
path = datapath(*path)
mypath = CustomFSPath(path)
)
def test_write_fspath_all(self, writer_name, writer_kwargs, module):
if writer_name in ["to_latex"]: # uses Styler implementation
- pytest.importorskip("jinja2")
+ td.versioned_importorskip("jinja2")
p1 = tm.ensure_clean("string")
p2 = tm.ensure_clean("fspath")
df = pd.DataFrame({"A": [1, 2]})
with p1 as string, p2 as fspath:
- pytest.importorskip(module)
+ td.versioned_importorskip(module)
mypath = CustomFSPath(fspath)
writer = getattr(df, writer_name)
# Same test as write_fspath_all, except HDF5 files aren't
# necessarily byte-for-byte identical for a given dataframe, so we'll
# have to read and compare equality
- pytest.importorskip("tables")
+ td.versioned_importorskip("tables")
df = pd.DataFrame({"A": [1, 2]})
p1 = tm.ensure_clean("string")
import numpy as np
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import (
"ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
)
-pa = pytest.importorskip("pyarrow")
+pa = td.versioned_importorskip("pyarrow")
@pytest.mark.single_cpu
@pytest.fixture
def fsspectest():
- pytest.importorskip("fsspec")
+ td.versioned_importorskip("fsspec")
from fsspec import register_implementation
from fsspec.implementations.memory import MemoryFileSystem
from fsspec.registry import _registry as registry
@pytest.fixture
def cleared_fs():
- fsspec = pytest.importorskip("fsspec")
+ fsspec = td.versioned_importorskip("fsspec")
memfs = fsspec.filesystem("memory")
yield memfs
def test_to_excel(cleared_fs, df1):
- pytest.importorskip("openpyxl")
+ td.versioned_importorskip("openpyxl")
ext = "xlsx"
path = f"memory://test/test.{ext}"
df1.to_excel(path, index=True)
@pytest.mark.parametrize("binary_mode", [False, True])
def test_to_csv_fsspec_object(cleared_fs, binary_mode, df1):
- fsspec = pytest.importorskip("fsspec")
+ fsspec = td.versioned_importorskip("fsspec")
path = "memory://test/test.csv"
mode = "wb" if binary_mode else "w"
def test_excel_options(fsspectest):
- pytest.importorskip("openpyxl")
+ td.versioned_importorskip("openpyxl")
extension = "xlsx"
df = DataFrame({"a": [0]})
def test_to_parquet_new_file(cleared_fs, df1):
"""Regression test for writing to a not-yet-existent GCS Parquet file."""
- pytest.importorskip("fastparquet")
+ td.versioned_importorskip("fastparquet")
df1.to_parquet(
"memory://test/test.csv", index=True, engine="fastparquet", compression=None
def test_arrowparquet_options(fsspectest):
"""Regression test for writing to a not-yet-existent GCS Parquet file."""
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = DataFrame({"a": [0]})
df.to_parquet(
"testmem://test/test.csv",
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) fastparquet
def test_fastparquet_options(fsspectest):
"""Regression test for writing to a not-yet-existent GCS Parquet file."""
- pytest.importorskip("fastparquet")
+ td.versioned_importorskip("fastparquet")
df = DataFrame({"a": [0]})
df.to_parquet(
@pytest.mark.single_cpu
def test_from_s3_csv(s3_public_bucket_with_data, tips_file, s3so):
- pytest.importorskip("s3fs")
+ td.versioned_importorskip("s3fs")
tm.assert_equal(
read_csv(
f"s3://{s3_public_bucket_with_data.name}/tips.csv", storage_options=s3so
@pytest.mark.single_cpu
@pytest.mark.parametrize("protocol", ["s3", "s3a", "s3n"])
def test_s3_protocols(s3_public_bucket_with_data, tips_file, protocol, s3so):
- pytest.importorskip("s3fs")
+ td.versioned_importorskip("s3fs")
tm.assert_equal(
read_csv(
f"{protocol}://{s3_public_bucket_with_data.name}/tips.csv",
@pytest.mark.single_cpu
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) fastparquet
def test_s3_parquet(s3_public_bucket, s3so, df1):
- pytest.importorskip("fastparquet")
- pytest.importorskip("s3fs")
+ td.versioned_importorskip("fastparquet")
+ td.versioned_importorskip("s3fs")
fn = f"s3://{s3_public_bucket.name}/test.parquet"
df1.to_parquet(
def test_feather_options(fsspectest):
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = DataFrame({"a": [0]})
df.to_feather("testmem://mockfile", storage_options={"test": "feather_write"})
assert fsspectest.test[0] == "feather_write"
def test_markdown_options(fsspectest):
- pytest.importorskip("tabulate")
+ td.versioned_importorskip("tabulate")
df = DataFrame({"a": [0]})
df.to_markdown("testmem://mockfile", storage_options={"test": "md_write"})
assert fsspectest.test[0] == "md_write"
def test_non_fsspec_options():
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
with pytest.raises(ValueError, match="storage_options"):
read_csv("localfile", storage_options={"a": True})
with pytest.raises(ValueError, match="storage_options"):
@pytest.fixture
def gcs_buffer():
"""Emulate GCS using a binary buffer."""
- pytest.importorskip("gcsfs")
- fsspec = pytest.importorskip("fsspec")
+ td.versioned_importorskip("gcsfs")
+ fsspec = td.versioned_importorskip("fsspec")
gcs_buffer = BytesIO()
gcs_buffer.close = lambda: True
df1.to_json(path)
df2 = read_json(path, convert_dates=["dt"])
elif format == "parquet":
- pytest.importorskip("pyarrow")
- pa_fs = pytest.importorskip("pyarrow.fs")
+ td.versioned_importorskip("pyarrow")
+ pa_fs = td.versioned_importorskip("pyarrow.fs")
class MockFileSystem(pa_fs.FileSystem):
@staticmethod
captured = capsys.readouterr()
assert captured.out == "Using pyarrow filesystem\nUsing pyarrow filesystem\n"
elif format == "markdown":
- pytest.importorskip("tabulate")
+ td.versioned_importorskip("tabulate")
df1.to_markdown(path)
df2 = df1
def test_to_parquet_gcs_new_file(monkeypatch, tmpdir):
"""Regression test for writing to a not-yet-existent GCS Parquet file."""
- pytest.importorskip("fastparquet")
- pytest.importorskip("gcsfs")
+ td.versioned_importorskip("fastparquet")
+ td.versioned_importorskip("gcsfs")
from fsspec import AbstractFileSystem
def test_bs4_version_fails(monkeypatch, datapath):
- bs4 = pytest.importorskip("bs4")
- pytest.importorskip("html5lib")
+ bs4 = td.versioned_importorskip("bs4")
+ td.versioned_importorskip("html5lib")
monkeypatch.setattr(bs4, "__version__", "4.2")
with pytest.raises(ImportError, match="Pandas requires version"):
def test_same_ordering(datapath):
- pytest.importorskip("bs4")
- pytest.importorskip("lxml")
- pytest.importorskip("html5lib")
+ td.versioned_importorskip("bs4")
+ td.versioned_importorskip("lxml")
+ td.versioned_importorskip("html5lib")
filename = datapath("io", "data", "html", "valid_markup.html")
dfs_lxml = read_html(filename, index_col=0, flavor=["lxml"])
string_array = StringArray(np.array(["a", "b", "c"], dtype=np.object_))
string_array_na = StringArray(np.array(["a", "b", NA], dtype=np.object_))
elif dtype_backend == "pyarrow":
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
from pandas.arrays import ArrowExtensionArray
string_array = ArrowExtensionArray(pa.array(["a", "b", "c"]))
string_array_na = ArrowExtensionArray(pa.array(["a", "b", None]))
else:
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
string_array = ArrowStringArray(pa.array(["a", "b", "c"]))
string_array_na = ArrowStringArray(pa.array(["a", "b", None]))
"Auth": "other_custom",
}
- pytest.importorskip(engine)
+ td.versioned_importorskip(engine)
true_df = pd.DataFrame({"column_name": ["column_value"]})
msg = (
import numpy as np
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
from pandas import read_orc
import pandas._testing as tm
from pandas.core.arrays import StringArray
-pytest.importorskip("pyarrow.orc")
+td.versioned_importorskip("pyarrow.orc")
import pyarrow as pa
def test_orc_roundtrip_file(dirpath):
# GH44554
# PyArrow gained ORC write support with the current argument order
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
data = {
"boolean1": np.array([False, True], dtype="bool"),
def test_orc_roundtrip_bytesio():
# GH44554
# PyArrow gained ORC write support with the current argument order
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
data = {
"boolean1": np.array([False, True], dtype="bool"),
def test_orc_writer_dtypes_not_supported(orc_writer_dtypes_not_supported):
# GH44554
# PyArrow gained ORC write support with the current argument order
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
msg = "The dtype of one or more columns is not supported yet."
with pytest.raises(NotImplementedError, match=msg):
def test_orc_dtype_backend_pyarrow():
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = pd.DataFrame(
{
"string": list("abc"),
def test_orc_dtype_backend_numpy_nullable():
# GH#50503
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = pd.DataFrame(
{
"string": list("abc"),
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas._config import using_copy_on_write
from pandas._config.config import _get_option
@pytest.mark.single_cpu
def test_parquet_read_from_url(self, httpserver, datapath, df_compat, engine):
if engine != "auto":
- pytest.importorskip(engine)
+ td.versioned_importorskip(engine)
with open(datapath("io", "data", "parquet", "simple.parquet"), mode="rb") as f:
httpserver.serve_content(content=f.read())
df = read_parquet(httpserver.url)
check_round_trip(df, engine)
def test_dtype_backend(self, engine, request):
- pq = pytest.importorskip("pyarrow.parquet")
+ pq = td.versioned_importorskip("pyarrow.parquet")
if engine == "fastparquet":
# We are manually disabling fastparquet's
@pytest.mark.single_cpu
def test_s3_roundtrip_explicit_fs(self, df_compat, s3_public_bucket, pa, s3so):
- s3fs = pytest.importorskip("s3fs")
+ s3fs = td.versioned_importorskip("s3fs")
s3 = s3fs.S3FileSystem(**s3so)
kw = {"filesystem": s3}
check_round_trip(
def test_s3_roundtrip_for_dir(
self, df_compat, s3_public_bucket, pa, partition_col, s3so
):
- pytest.importorskip("s3fs")
+ td.versioned_importorskip("s3fs")
# GH #26388
expected_df = df_compat.copy()
)
def test_read_file_like_obj_support(self, df_compat):
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
buffer = BytesIO()
df_compat.to_parquet(buffer)
df_from_buf = read_parquet(buffer)
tm.assert_frame_equal(df_compat, df_from_buf)
def test_expand_user(self, df_compat, monkeypatch):
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
monkeypatch.setenv("HOME", "TestingUser")
monkeypatch.setenv("USERPROFILE", "TestingUser")
with pytest.raises(OSError, match=r".*TestingUser.*"):
def test_additional_extension_arrays(self, pa):
# test additional ExtensionArrays that are supported through the
# __arrow_array__ protocol
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = pd.DataFrame(
{
"a": pd.Series([1, 2, 3], dtype="Int64"),
def test_pyarrow_backed_string_array(self, pa, string_storage):
# test ArrowStringArray supported through the __arrow_array__ protocol
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = pd.DataFrame({"a": pd.Series(["a", None, "c"], dtype="string[pyarrow]")})
with pd.option_context("string_storage", string_storage):
check_round_trip(df, pa, expected=df.astype(f"string[{string_storage}]"))
def test_additional_extension_types(self, pa):
# test additional ExtensionArrays that are supported through the
# __arrow_array__ protocol + by defining a custom ExtensionType
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = pd.DataFrame(
{
"c": pd.IntervalIndex.from_tuples([(0, 1), (1, 2), (3, 4)]),
def test_filter_row_groups(self, pa):
# https://github.com/pandas-dev/pandas/issues/26551
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = pd.DataFrame({"a": list(range(3))})
with tm.ensure_clean() as path:
df.to_parquet(path, engine=pa)
tm.assert_frame_equal(result, df)
def test_filesystem_notimplemented(self):
- pytest.importorskip("fastparquet")
+ td.versioned_importorskip("fastparquet")
df = pd.DataFrame(data={"A": [0, 1], "B": [1, 0]})
with tm.ensure_clean() as path:
with pytest.raises(
read_parquet(path, engine="fastparquet", filesystem="foo")
def test_invalid_filesystem(self):
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = pd.DataFrame(data={"A": [0, 1], "B": [1, 0]})
with tm.ensure_clean() as path:
with pytest.raises(
read_parquet(path, engine="pyarrow", filesystem="foo")
def test_unsupported_pa_filesystem_storage_options(self):
- pa_fs = pytest.importorskip("pyarrow.fs")
+ pa_fs = td.versioned_importorskip("pyarrow.fs")
df = pd.DataFrame(data={"A": [0, 1], "B": [1, 0]})
with tm.ensure_clean() as path:
with pytest.raises(
def test_pickle_fsspec_roundtrip():
- pytest.importorskip("fsspec")
+ td.versioned_importorskip("fsspec")
with tm.ensure_clean():
mockurl = "memory://mockfile"
df = DataFrame(
import pytest
+import pandas.util._test_decorators as td
from pandas import read_csv
def test_streaming_s3_objects():
# GH17135
# botocore gained iteration support in 1.10.47, can now be used in read_*
- pytest.importorskip("botocore", minversion="1.10.47")
+ td.versioned_importorskip("botocore", min_version="1.10.47")
from botocore.response import StreamingBody
data = [b"foo,bar,baz\n1,2,3\n4,5,6\n", b"just,the,header\n"]
@pytest.mark.single_cpu
def test_read_without_creds_from_pub_bucket(s3_public_bucket_with_data, s3so):
# GH 34626
- pytest.importorskip("s3fs")
+ td.versioned_importorskip("s3fs")
result = read_csv(
f"s3://{s3_public_bucket_with_data.name}/tips.csv",
nrows=3,
def test_read_with_creds_from_pub_bucket(s3_public_bucket_with_data, s3so):
# Ensure we can read from a public bucket with credentials
# GH 34626
- pytest.importorskip("s3fs")
+ td.versioned_importorskip("s3fs")
df = read_csv(
f"s3://{s3_public_bucket_with_data.name}/tips.csv",
nrows=5,
import numpy as np
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
from pandas.util.version import Version
-pyreadstat = pytest.importorskip("pyreadstat")
+pyreadstat = td.versioned_importorskip("pyreadstat")
# TODO(CoW) - detection of chained assignment in cython
expected = pd.DataFrame({"var1": [1.0, 2.0, 1.0, 3.0]}, dtype="Int64")
if dtype_backend == "pyarrow":
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
from pandas.arrays import ArrowExtensionArray
@pytest.fixture
def mysql_pymysql_engine():
- sqlalchemy = pytest.importorskip("sqlalchemy")
- pymysql = pytest.importorskip("pymysql")
+ sqlalchemy = td.versioned_importorskip("sqlalchemy")
+ pymysql = td.versioned_importorskip("pymysql")
engine = sqlalchemy.create_engine(
"mysql+pymysql://root@localhost:3306/pandas",
connect_args={"client_flag": pymysql.constants.CLIENT.MULTI_STATEMENTS},
@pytest.fixture
def postgresql_psycopg2_engine():
- sqlalchemy = pytest.importorskip("sqlalchemy")
- pytest.importorskip("psycopg2")
+ sqlalchemy = td.versioned_importorskip("sqlalchemy")
+ td.versioned_importorskip("psycopg2")
engine = sqlalchemy.create_engine(
"postgresql+psycopg2://postgres:postgres@localhost:5432/pandas",
poolclass=sqlalchemy.pool.NullPool,
@pytest.fixture
def postgresql_adbc_conn():
- pytest.importorskip("adbc_driver_postgresql")
+ td.versioned_importorskip("adbc_driver_postgresql")
from adbc_driver_postgresql import dbapi
uri = "postgresql://postgres:postgres@localhost:5432/pandas"
@pytest.fixture
def sqlite_str():
- pytest.importorskip("sqlalchemy")
+ td.versioned_importorskip("sqlalchemy")
with tm.ensure_clean() as name:
yield f"sqlite:///{name}"
@pytest.fixture
def sqlite_engine(sqlite_str):
- sqlalchemy = pytest.importorskip("sqlalchemy")
+ sqlalchemy = td.versioned_importorskip("sqlalchemy")
engine = sqlalchemy.create_engine(sqlite_str, poolclass=sqlalchemy.pool.NullPool)
yield engine
for view in get_all_views(engine):
@pytest.fixture
def sqlite_str_iris(sqlite_str, iris_path):
- sqlalchemy = pytest.importorskip("sqlalchemy")
+ sqlalchemy = td.versioned_importorskip("sqlalchemy")
engine = sqlalchemy.create_engine(sqlite_str)
create_and_load_iris(engine, iris_path)
create_and_load_iris_view(engine)
@pytest.fixture
def sqlite_str_types(sqlite_str, types_data):
- sqlalchemy = pytest.importorskip("sqlalchemy")
+ sqlalchemy = td.versioned_importorskip("sqlalchemy")
engine = sqlalchemy.create_engine(sqlite_str)
create_and_load_types(engine, types_data, "sqlite")
engine.dispose()
@pytest.fixture
def sqlite_adbc_conn():
- pytest.importorskip("adbc_driver_sqlite")
+ td.versioned_importorskip("adbc_driver_sqlite")
from adbc_driver_sqlite import dbapi
with tm.ensure_clean() as name:
@pytest.mark.parametrize("conn", all_connectable)
def test_dataframe_to_sql_arrow_dtypes(conn, request):
# GH 52046
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = DataFrame(
{
"int": pd.array([1], dtype="int8[pyarrow]"),
@pytest.mark.parametrize("conn", all_connectable)
def test_dataframe_to_sql_arrow_dtypes_missing(conn, request, nulls_fixture):
# GH 52046
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = DataFrame(
{
"datetime": pd.array(
@pytest.mark.parametrize("conn", all_connectable)
def test_database_uri_string(conn, request, test_frame1):
- pytest.importorskip("sqlalchemy")
+ td.versioned_importorskip("sqlalchemy")
conn = request.getfixturevalue(conn)
# Test read_sql and .to_sql method with a database URI (GH10654)
# db_uri = 'sqlite:///:memory:' # raises
@td.skip_if_installed("pg8000")
@pytest.mark.parametrize("conn", all_connectable)
def test_pg8000_sqlalchemy_passthrough_error(conn, request):
- pytest.importorskip("sqlalchemy")
+ td.versioned_importorskip("sqlalchemy")
conn = request.getfixturevalue(conn)
# using driver that will not be installed on CI to trigger error
# in sqlalchemy.create_engine -> test passing of this error to user
# The input {"foo": [-np.inf], "infe0": ["bar"]} does not raise any error
# for pymysql version >= 0.10
# TODO(GH#36465): remove this version check after GH 36465 is fixed
- pymysql = pytest.importorskip("pymysql")
+ pymysql = td.versioned_importorskip("pymysql")
if Version(pymysql.__version__) < Version("1.0.3") and "infe0" in df.columns:
mark = pytest.mark.xfail(reason="GH 36465")
def test_options_get_engine():
- pytest.importorskip("sqlalchemy")
+ td.versioned_importorskip("sqlalchemy")
assert isinstance(get_engine("sqlalchemy"), SQLAlchemyEngine)
with pd.option_context("io.sql.engine", "sqlalchemy"):
string_array_na = StringArray(np.array(["a", "b", pd.NA], dtype=np.object_))
elif dtype_backend == "pyarrow":
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
from pandas.arrays import ArrowExtensionArray
string_array = ArrowExtensionArray(pa.array(["a", "b", "c"])) # type: ignore[assignment]
string_array_na = ArrowExtensionArray(pa.array(["a", "b", None])) # type: ignore[assignment]
else:
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
string_array = ArrowStringArray(pa.array(["a", "b", "c"]))
string_array_na = ArrowStringArray(pa.array(["a", "b", None]))
}
)
if dtype_backend == "pyarrow":
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
from pandas.arrays import ArrowExtensionArray
def test_read_sql_string_inference(sqlite_engine):
conn = sqlite_engine
# GH#54430
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
table = "test"
df = DataFrame({"a": ["x", "y"]})
df.to_sql(table, con=conn, index=False, if_exists="replace")
with bz2.open(path, "rb") as comp:
fp = io.BytesIO(comp.read())
elif compression == "zstd":
- zstd = pytest.importorskip("zstandard")
+ zstd = td.versioned_importorskip("zstandard")
with zstd.open(path, "rb") as comp:
fp = io.BytesIO(comp.read())
elif compression == "xz":
- lzma = pytest.importorskip("lzma")
+ lzma = td.versioned_importorskip("lzma")
with lzma.open(path, "rb") as comp:
fp = io.BytesIO(comp.read())
elif compression is None:
def test_correct_encoding_file(xml_baby_names):
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
df_file = read_xml(xml_baby_names, encoding="ISO-8859-1", parser="lxml")
with tm.ensure_clean("test.xml") as path:
@pytest.mark.parametrize("encoding", ["UTF-8", "UTF-16", "ISO-8859-1"])
def test_wrong_encoding_option_lxml(xml_baby_names, parser, encoding):
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
df_file = read_xml(xml_baby_names, encoding="ISO-8859-1", parser="lxml")
with tm.ensure_clean("test.xml") as path:
def test_xml_declaration_pretty_print(geom_df):
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
expected = """\
<data>
<row>
def test_stylesheet_file_like(xsl_row_field_output, mode, geom_df):
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
with open(
xsl_row_field_output, mode, encoding="utf-8" if mode == "r" else None
) as f:
def test_stylesheet_io(xsl_row_field_output, mode, geom_df):
# note: By default the bodies of untyped functions are not checked,
# consider using --check-untyped-defs
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
xsl_obj: BytesIO | StringIO # type: ignore[annotation-unchecked]
with open(
def test_stylesheet_buffered_reader(xsl_row_field_output, mode, geom_df):
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
with open(
xsl_row_field_output, mode, encoding="utf-8" if mode == "r" else None
) as f:
def test_stylesheet_wrong_path(geom_df):
- lxml_etree = pytest.importorskip("lxml.etree")
+ lxml_etree = td.versioned_importorskip("lxml.etree")
xsl = os.path.join("data", "xml", "row_field_output.xslt")
@pytest.mark.parametrize("val", ["", b""])
def test_empty_string_stylesheet(val, geom_df):
- lxml_etree = pytest.importorskip("lxml.etree")
+ lxml_etree = td.versioned_importorskip("lxml.etree")
msg = "|".join(
[
def test_incorrect_xsl_syntax(geom_df):
- lxml_etree = pytest.importorskip("lxml.etree")
+ lxml_etree = td.versioned_importorskip("lxml.etree")
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
def test_incorrect_xsl_eval(geom_df):
- lxml_etree = pytest.importorskip("lxml.etree")
+ lxml_etree = td.versioned_importorskip("lxml.etree")
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
def test_incorrect_xsl_apply(geom_df):
- lxml_etree = pytest.importorskip("lxml.etree")
+ lxml_etree = td.versioned_importorskip("lxml.etree")
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
def test_style_to_csv(geom_df):
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="text" indent="yes" />
def test_style_to_string(geom_df):
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="text" indent="yes" />
def test_style_to_json(geom_df):
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="text" indent="yes" />
@pytest.mark.single_cpu
def test_s3_permission_output(parser, s3_public_bucket, geom_df):
- s3fs = pytest.importorskip("s3fs")
- pytest.importorskip("lxml")
+ s3fs = td.versioned_importorskip("s3fs")
+ td.versioned_importorskip("lxml")
with tm.external_error_raised((PermissionError, FileNotFoundError)):
fs = s3fs.S3FileSystem(anon=True)
def test_literal_xml_deprecation():
# GH 53809
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
msg = (
"Passing literal xml to 'read_xml' is deprecated and "
"will be removed in a future version. To read from a "
def test_parser_consistency_file(xml_books):
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
df_file_lxml = read_xml(xml_books, parser="lxml")
df_file_etree = read_xml(xml_books, parser="etree")
@pytest.mark.parametrize("val", ["", b""])
def test_empty_string_lxml(val):
- lxml_etree = pytest.importorskip("lxml.etree")
+ lxml_etree = td.versioned_importorskip("lxml.etree")
msg = "|".join(
[
@pytest.mark.network
@pytest.mark.single_cpu
def test_url(httpserver, xml_file):
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
with open(xml_file, encoding="utf-8") as f:
httpserver.serve_content(content=f.read())
df_url = read_xml(httpserver.url, xpath=".//book[count(*)=4]")
def test_empty_xpath_lxml(xml_books):
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
with pytest.raises(ValueError, match=("xpath does not return any nodes")):
read_xml(xml_books, xpath=".//python", parser="lxml")
def test_bad_xpath_lxml(xml_books):
- lxml_etree = pytest.importorskip("lxml.etree")
+ lxml_etree = td.versioned_importorskip("lxml.etree")
with pytest.raises(lxml_etree.XPathEvalError, match=("Invalid expression")):
read_xml(xml_books, xpath=".//[book]", parser="lxml")
def test_consistency_default_namespace():
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
df_lxml = read_xml(
StringIO(xml_default_nmsp),
xpath=".//ns:row",
def test_consistency_prefix_namespace():
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
df_lxml = read_xml(
StringIO(xml_prefix_nmsp),
xpath=".//doc:row",
def test_missing_prefix_definition_lxml(kml_cta_rail_lines):
- lxml_etree = pytest.importorskip("lxml.etree")
+ lxml_etree = td.versioned_importorskip("lxml.etree")
with pytest.raises(lxml_etree.XPathEvalError, match=("Undefined namespace prefix")):
read_xml(kml_cta_rail_lines, xpath=".//kml:Placemark", parser="lxml")
@pytest.mark.parametrize("key", ["", None])
def test_none_namespace_prefix(key):
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
with pytest.raises(
TypeError, match=("empty namespace prefix is not supported in XPath")
):
def test_attribute_centric_xml():
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
xml = """\
<?xml version="1.0" encoding="UTF-8"?>
<TrainSchedule>
def test_parser_consistency_with_encoding(xml_baby_names):
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
df_xpath_lxml = read_xml(xml_baby_names, parser="lxml", encoding="ISO-8859-1")
df_xpath_etree = read_xml(xml_baby_names, parser="etree", encoding="iso-8859-1")
def test_wrong_encoding_for_lxml():
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
# GH#45133
data = """<data>
<row>
def test_stylesheet_file(kml_cta_rail_lines, xsl_flatten_doc):
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
df_style = read_xml(
kml_cta_rail_lines,
xpath=".//k:Placemark",
def test_stylesheet_file_like(kml_cta_rail_lines, xsl_flatten_doc, mode):
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
with open(xsl_flatten_doc, mode, encoding="utf-8" if mode == "r" else None) as f:
df_style = read_xml(
kml_cta_rail_lines,
def test_stylesheet_io(kml_cta_rail_lines, xsl_flatten_doc, mode):
# note: By default the bodies of untyped functions are not checked,
# consider using --check-untyped-defs
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
xsl_obj: BytesIO | StringIO # type: ignore[annotation-unchecked]
with open(xsl_flatten_doc, mode, encoding="utf-8" if mode == "r" else None) as f:
def test_stylesheet_buffered_reader(kml_cta_rail_lines, xsl_flatten_doc, mode):
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
with open(xsl_flatten_doc, mode, encoding="utf-8" if mode == "r" else None) as f:
xsl_obj = f.read()
def test_style_charset():
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
xml = "<中文標籤><row><c1>1</c1><c2>2</c2></row></中文標籤>"
xsl = """\
def test_not_stylesheet(kml_cta_rail_lines, xml_books):
- lxml_etree = pytest.importorskip("lxml.etree")
+ lxml_etree = td.versioned_importorskip("lxml.etree")
with pytest.raises(
lxml_etree.XSLTParseError, match=("document is not a stylesheet")
def test_incorrect_xsl_syntax(kml_cta_rail_lines):
- lxml_etree = pytest.importorskip("lxml.etree")
+ lxml_etree = td.versioned_importorskip("lxml.etree")
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
def test_incorrect_xsl_eval(kml_cta_rail_lines):
- lxml_etree = pytest.importorskip("lxml.etree")
+ lxml_etree = td.versioned_importorskip("lxml.etree")
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
def test_incorrect_xsl_apply(kml_cta_rail_lines):
- lxml_etree = pytest.importorskip("lxml.etree")
+ lxml_etree = td.versioned_importorskip("lxml.etree")
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
def test_wrong_stylesheet(kml_cta_rail_lines, xml_data_path):
- xml_etree = pytest.importorskip("lxml.etree")
+ xml_etree = td.versioned_importorskip("lxml.etree")
xsl = xml_data_path / "flatten.xsl"
def test_stylesheet_file_close(kml_cta_rail_lines, xsl_flatten_doc, mode):
# note: By default the bodies of untyped functions are not checked,
# consider using --check-untyped-defs
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
xsl_obj: BytesIO | StringIO # type: ignore[annotation-unchecked]
with open(xsl_flatten_doc, mode, encoding="utf-8" if mode == "r" else None) as f:
def test_stylesheet_with_etree(kml_cta_rail_lines, xsl_flatten_doc):
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
with pytest.raises(
ValueError, match=("To use stylesheet, you need lxml installed")
):
@pytest.mark.parametrize("val", ["", b""])
def test_empty_stylesheet(val, datapath):
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
msg = (
"Passing literal xml to 'read_xml' is deprecated and "
"will be removed in a future version. To read from a "
def test_online_stylesheet():
- pytest.importorskip("lxml")
+ td.versioned_importorskip("lxml")
xml = """\
<?xml version="1.0" encoding="UTF-8"?>
<catalog>
@pytest.mark.network
@pytest.mark.single_cpu
def test_s3_parser_consistency(s3_public_bucket_with_data, s3so):
- pytest.importorskip("s3fs")
- pytest.importorskip("lxml")
+ td.versioned_importorskip("s3fs")
+ td.versioned_importorskip("lxml")
s3 = f"s3://{s3_public_bucket_with_data.name}/books.xml"
df_lxml = read_xml(s3, parser="lxml", storage_options=s3so)
</data>"""
if using_infer_string:
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
string_array = ArrowStringArrayNumpySemantics(pa.array(["x", "y"]))
string_array_na = ArrowStringArrayNumpySemantics(pa.array(["x", None]))
string_array_na = StringArray(np.array(["x", NA], dtype=np.object_))
elif dtype_backend == "pyarrow":
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
from pandas.arrays import ArrowExtensionArray
string_array = ArrowExtensionArray(pa.array(["x", "y"]))
string_array_na = ArrowExtensionArray(pa.array(["x", None]))
else:
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
string_array = ArrowStringArray(pa.array(["x", "y"]))
string_array_na = ArrowStringArray(pa.array(["x", None]))
)
if dtype_backend == "pyarrow":
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
from pandas.arrays import ArrowExtensionArray
expected = DataFrame(
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas import (
DataFrame,
to_datetime,
# 1) Resets units registry
# 2) Resets rc_context
# 3) Closes all figures
- mpl = pytest.importorskip("matplotlib")
- mpl_units = pytest.importorskip("matplotlib.units")
- plt = pytest.importorskip("matplotlib.pyplot")
+ mpl = td.versioned_importorskip("matplotlib")
+ mpl_units = td.versioned_importorskip("matplotlib.units")
+ plt = td.versioned_importorskip("matplotlib.pyplot")
orig_units_registry = mpl_units.registry.copy()
with mpl.rc_context():
mpl.use("template")
from pandas.io.formats.printing import pprint_thing
-mpl = pytest.importorskip("matplotlib")
-plt = pytest.importorskip("matplotlib.pyplot")
+mpl = td.versioned_importorskip("matplotlib")
+plt = td.versioned_importorskip("matplotlib.pyplot")
class TestDataFramePlots:
_check_box_return_type(result, return_type)
def test_kde_df(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = DataFrame(np.random.default_rng(2).standard_normal((100, 4)))
ax = _check_plot_works(df.plot, kind="kde")
expected = [pprint_thing(c) for c in df.columns]
_check_ticks_props(ax, xrot=0)
def test_kde_df_rot(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
ax = df.plot(kind="kde", rot=20, fontsize=5)
_check_ticks_props(ax, xrot=20, xlabelsize=5, ylabelsize=5)
def test_kde_df_subplots(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
axes = _check_plot_works(
df.plot,
_check_axes_shape(axes, axes_num=4, layout=(4, 1))
def test_kde_df_logy(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
axes = df.plot(kind="kde", logy=True, subplots=True)
_check_ax_scales(axes, yaxis="log")
def test_kde_missing_vals(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = DataFrame(np.random.default_rng(2).uniform(size=(100, 4)))
df.loc[0, 0] = np.nan
_check_plot_works(df.plot, kind="kde")
@pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds)
def test_kind_both_ways(self, kind):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = DataFrame({"x": [1, 2, 3]})
df.plot(kind=kind)
getattr(df.plot, kind)()
@pytest.mark.parametrize("kind", ["scatter", "hexbin"])
def test_kind_both_ways_x_y(self, kind):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = DataFrame({"x": [1, 2, 3]})
df.plot("x", "x", kind=kind)
getattr(df.plot, kind)("x", "x")
@pytest.mark.parametrize("kind", plotting.PlotAccessor._all_kinds)
def test_memory_leak(self, kind):
"""Check that every plot type gets properly collected."""
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
args = {}
if kind in ["hexbin", "scatter", "pie"]:
df = DataFrame(
"kind", ("line", "bar", "barh", "hist", "kde", "density", "area", "pie")
)
def test_group_subplot(self, kind):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
d = {
"a": np.arange(10),
"b": np.arange(10) + 1,
import numpy as np
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame
import pandas._testing as tm
)
from pandas.util.version import Version
-mpl = pytest.importorskip("matplotlib")
-plt = pytest.importorskip("matplotlib.pyplot")
-cm = pytest.importorskip("matplotlib.cm")
+mpl = td.versioned_importorskip("matplotlib")
+plt = td.versioned_importorskip("matplotlib.pyplot")
+cm = td.versioned_importorskip("matplotlib.cm")
def _check_colors_box(bp, box_c, whiskers_c, medians_c, caps_c="k", fliers_c=None):
_check_colors(ax.patches[::10], facecolors=["green"] * 5)
def test_kde_colors(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
custom_colors = "rgcby"
df = DataFrame(np.random.default_rng(2).random((5, 5)))
@pytest.mark.parametrize("colormap", ["jet", cm.jet])
def test_kde_colors_cmap(self, colormap):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
ax = df.plot.kde(colormap=colormap)
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
_check_colors(ax.get_lines(), linecolors=rgba_colors)
def test_kde_colors_and_styles_subplots(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
default_colors = _unpack_cycler(mpl.pyplot.rcParams)
df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
@pytest.mark.parametrize("colormap", ["k", "red"])
def test_kde_colors_and_styles_subplots_single_col_str(self, colormap):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
axes = df.plot(kind="kde", color=colormap, subplots=True)
for ax in axes:
_check_colors(ax.get_lines(), linecolors=[colormap])
def test_kde_colors_and_styles_subplots_custom_color(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
custom_colors = "rgcby"
axes = df.plot(kind="kde", color=custom_colors, subplots=True)
@pytest.mark.parametrize("colormap", ["jet", cm.jet])
def test_kde_colors_and_styles_subplots_cmap(self, colormap):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
axes = df.plot(kind="kde", colormap=colormap, subplots=True)
_check_colors(ax.get_lines(), linecolors=[c])
def test_kde_colors_and_styles_subplots_single_col(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
_check_colors(axes[0].lines, linecolors=["DodgerBlue"])
def test_kde_colors_and_styles_subplots_single_char(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
# list of styles
# single character style
_check_colors(ax.get_lines(), linecolors=["r"])
def test_kde_colors_and_styles_subplots_list(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
# list of styles
styles = list("rgcby")
import pytest
+import pandas.util._test_decorators as td
from pandas import DataFrame
from pandas.tests.plotting.common import _check_visible
-pytest.importorskip("matplotlib")
+td.versioned_importorskip("matplotlib")
class TestDataFramePlotsGroupby:
)
from pandas.util.version import Version
-mpl = pytest.importorskip("matplotlib")
+mpl = td.versioned_importorskip("matplotlib")
class TestFrameLegend:
@pytest.mark.parametrize("kind", ["line", "bar", "barh", "kde", "area", "hist"])
def test_df_legend_labels(self, kind):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = DataFrame(np.random.default_rng(2).random((3, 3)), columns=["a", "b", "c"])
df2 = DataFrame(
np.random.default_rng(2).random((3, 3)), columns=["d", "e", "f"]
_check_legend_labels(ax, labels=expected)
def test_df_legend_labels_secondary_y(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = DataFrame(np.random.default_rng(2).random((3, 3)), columns=["a", "b", "c"])
df2 = DataFrame(
np.random.default_rng(2).random((3, 3)), columns=["d", "e", "f"]
def test_df_legend_labels_time_series(self):
# Time Series
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
ind = date_range("1/1/2014", periods=3)
df = DataFrame(
np.random.default_rng(2).standard_normal((3, 3)),
def test_df_legend_labels_time_series_scatter(self):
# Time Series
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
ind = date_range("1/1/2014", periods=3)
df = DataFrame(
np.random.default_rng(2).standard_normal((3, 3)),
_check_legend_labels(ax, labels=["data1", "data3"])
def test_df_legend_labels_time_series_no_mutate(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
ind = date_range("1/1/2014", periods=3)
df = DataFrame(
np.random.default_rng(2).standard_normal((3, 3)),
from numpy.testing import assert_array_almost_equal_nulp
import pytest
+import pandas.util._test_decorators as td
from pandas.compat import is_platform_linux
from pandas.compat.numpy import np_version_gte1p24
from pandas.io.formats.printing import pprint_thing
-mpl = pytest.importorskip("matplotlib")
-plt = pytest.importorskip("matplotlib.pyplot")
+mpl = td.versioned_importorskip("matplotlib")
+plt = td.versioned_importorskip("matplotlib.pyplot")
class TestDataFramePlotsSubplots:
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas import DataFrame
import pandas._testing as tm
from pandas.tests.plotting.common import (
get_y_axis,
)
-pytest.importorskip("matplotlib")
+td.versioned_importorskip("matplotlib")
@pytest.fixture
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas import (
DataFrame,
MultiIndex,
from pandas.io.formats.printing import pprint_thing
-mpl = pytest.importorskip("matplotlib")
-plt = pytest.importorskip("matplotlib.pyplot")
+mpl = td.versioned_importorskip("matplotlib")
+plt = td.versioned_importorskip("matplotlib.pyplot")
def _check_ax_limits(col, ax):
import pytest
+import pandas.util._test_decorators as td
from pandas import DataFrame
from pandas.tests.plotting.common import (
_check_plot_works,
_gen_two_subplots,
)
-plt = pytest.importorskip("matplotlib.pyplot")
+plt = td.versioned_importorskip("matplotlib.pyplot")
class TestCommon:
import numpy as np
import pytest
+import pandas.util._test_decorators as td
import pandas._config.config as cf
from pandas._libs.tslibs import to_offset
# causing an improper skip
pass
-pytest.importorskip("matplotlib.pyplot")
-dates = pytest.importorskip("matplotlib.dates")
+td.versioned_importorskip("matplotlib.pyplot")
+dates = td.versioned_importorskip("matplotlib.dates")
@pytest.mark.single_cpu
assert subprocess.check_call(call) == 0
def test_registering_no_warning(self):
- plt = pytest.importorskip("matplotlib.pyplot")
+ plt = td.versioned_importorskip("matplotlib.pyplot")
s = Series(range(12), index=date_range("2017", periods=12))
_, ax = plt.subplots()
plt.close()
def test_pandas_plots_register(self):
- plt = pytest.importorskip("matplotlib.pyplot")
+ plt = td.versioned_importorskip("matplotlib.pyplot")
s = Series(range(12), index=date_range("2017", periods=12))
# Set to the "warn" state, in case this isn't the first test run
with tm.assert_produces_warning(None) as w:
plt.close()
def test_matplotlib_formatters(self):
- units = pytest.importorskip("matplotlib.units")
+ units = td.versioned_importorskip("matplotlib.units")
# Can't make any assertion about the start state.
# We we check that toggling converters off removes it, and toggling it
assert Timestamp in units.registry
def test_option_no_warning(self):
- pytest.importorskip("matplotlib.pyplot")
+ td.versioned_importorskip("matplotlib.pyplot")
ctx = cf.option_context("plotting.matplotlib.register_converters", False)
- plt = pytest.importorskip("matplotlib.pyplot")
+ plt = td.versioned_importorskip("matplotlib.pyplot")
s = Series(range(12), index=date_range("2017", periods=12))
_, ax = plt.subplots()
plt.close()
def test_registry_resets(self):
- units = pytest.importorskip("matplotlib.units")
- dates = pytest.importorskip("matplotlib.dates")
+ units = td.versioned_importorskip("matplotlib.units")
+ dates = td.versioned_importorskip("matplotlib.dates")
# make a copy, to reset to
original = dict(units.registry)
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas._libs.tslibs import (
BaseOffset,
to_offset,
from pandas.tseries.offsets import WeekOfMonth
-mpl = pytest.importorskip("matplotlib")
+mpl = td.versioned_importorskip("matplotlib")
class TestTSPlot:
assert ax.get_yaxis().get_visible()
def test_secondary_kde(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
ser = Series(np.random.default_rng(2).standard_normal(10))
fig, ax = mpl.pyplot.subplots()
ax = ser.plot(secondary_y=True, kind="density", ax=ax)
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Index,
_check_legend_labels,
)
-pytest.importorskip("matplotlib")
+td.versioned_importorskip("matplotlib")
class TestDataFrameGroupByPlots:
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Index,
get_y_axis,
)
-mpl = pytest.importorskip("matplotlib")
+mpl = td.versioned_importorskip("matplotlib")
@pytest.fixture
@pytest.mark.xfail(reason="Api changed in 3.6.0")
def test_hist_kde(self, ts):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
_, ax = mpl.pyplot.subplots()
ax = ts.plot.hist(logy=True, ax=ax)
_check_ax_scales(ax, yaxis="log")
_check_text_labels(ylabels, [""] * len(ylabels))
def test_hist_kde_plot_works(self, ts):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
_check_plot_works(ts.plot.kde)
def test_hist_kde_density_works(self, ts):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
_check_plot_works(ts.plot.density)
@pytest.mark.xfail(reason="Api changed in 3.6.0")
def test_hist_kde_logy(self, ts):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
_, ax = mpl.pyplot.subplots()
ax = ts.plot.kde(logy=True, ax=ax)
_check_ax_scales(ax, yaxis="log")
_check_text_labels(ylabels, [""] * len(ylabels))
def test_hist_kde_color_bins(self, ts):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
_, ax = mpl.pyplot.subplots()
ax = ts.plot.hist(logy=True, bins=10, color="b", ax=ax)
_check_ax_scales(ax, yaxis="log")
_check_colors(ax.patches, facecolors=["b"] * 10)
def test_hist_kde_color(self, ts):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
_, ax = mpl.pyplot.subplots()
ax = ts.plot.kde(logy=True, color="r", ax=ax)
_check_ax_scales(ax, yaxis="log")
def test_hist_with_nans_and_weights(self):
# GH 48884
- mpl_patches = pytest.importorskip("matplotlib.patches")
+ mpl_patches = td.versioned_importorskip("matplotlib.patches")
df = DataFrame(
[[np.nan, 0.2, 0.3], [0.4, np.nan, np.nan], [0.7, 0.8, 0.9]],
columns=list("abc"),
_check_ticks_props,
)
-mpl = pytest.importorskip("matplotlib")
-plt = pytest.importorskip("matplotlib.pyplot")
-cm = pytest.importorskip("matplotlib.cm")
+mpl = td.versioned_importorskip("matplotlib")
+plt = td.versioned_importorskip("matplotlib.pyplot")
+cm = td.versioned_importorskip("matplotlib.cm")
@pytest.fixture
class TestDataFramePlots:
@pytest.mark.parametrize("pass_axis", [False, True])
def test_scatter_matrix_axis(self, pass_axis):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
scatter_matrix = plotting.scatter_matrix
ax = None
@pytest.mark.parametrize("pass_axis", [False, True])
def test_scatter_matrix_axis_smaller(self, pass_axis):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
scatter_matrix = plotting.scatter_matrix
ax = None
get_y_axis,
)
-mpl = pytest.importorskip("matplotlib")
-plt = pytest.importorskip("matplotlib.pyplot")
+mpl = td.versioned_importorskip("matplotlib")
+plt = td.versioned_importorskip("matplotlib.pyplot")
@pytest.fixture
],
)
def test_kde_kwargs(self, ts, bw_method, ind):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
_check_plot_works(ts.plot.kde, bw_method=bw_method, ind=ind)
def test_density_kwargs(self, ts):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
sample_points = np.linspace(-100, 100, 20)
_check_plot_works(ts.plot.density, bw_method=0.5, ind=sample_points)
def test_kde_kwargs_check_axes(self, ts):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
_, ax = mpl.pyplot.subplots()
sample_points = np.linspace(-100, 100, 20)
ax = ts.plot.kde(logy=True, bw_method=0.5, ind=sample_points, ax=ax)
_check_text_labels(ax.yaxis.get_label(), "Density")
def test_kde_missing_vals(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
s = Series(np.random.default_rng(2).uniform(size=50))
s[0] = np.nan
axes = _check_plot_works(s.plot.kde)
plotting.PlotAccessor._common_kinds + plotting.PlotAccessor._series_kinds,
)
def test_kind_kwarg(self, kind):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
s = Series(range(3))
_, ax = mpl.pyplot.subplots()
s.plot(kind=kind, ax=ax)
plotting.PlotAccessor._common_kinds + plotting.PlotAccessor._series_kinds,
)
def test_kind_attr(self, kind):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
s = Series(range(3))
_, ax = mpl.pyplot.subplots()
getattr(s.plot, kind)()
@pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds)
def test_valid_object_plot(self, kind):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
s = Series(range(10), dtype=object)
_check_plot_works(s.plot, kind=kind)
@pytest.mark.slow
def test_series_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
_check_grid_settings(
Series([1, 2, 3]),
plotting.PlotAccessor._series_kinds + plotting.PlotAccessor._common_kinds,
import pytest
+import pandas.util._test_decorators as td
from pandas import Series
-pytest.importorskip("matplotlib")
+td.versioned_importorskip("matplotlib")
from pandas.plotting._matplotlib.style import get_standard_colors
import numpy as np
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical,
def test_any_all_pyarrow_string(self):
# GH#54591
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
ser = Series(["", "a"], dtype="string[pyarrow_numpy]")
assert ser.any()
assert not ser.all()
import numpy as np
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
assert pd.isna(result)
def test_skew(self):
- sp_stats = pytest.importorskip("scipy.stats")
+ sp_stats = td.versioned_importorskip("scipy.stats")
string_series = Series(range(20), dtype=np.float64, name="series")
assert (df.skew() == 0).all()
def test_kurt(self):
- sp_stats = pytest.importorskip("scipy.stats")
+ sp_stats = td.versioned_importorskip("scipy.stats")
string_series = Series(range(20), dtype=np.float64, name="series")
def test_resample_dtype_coercion(unit):
- pytest.importorskip("scipy.interpolate")
+ td.versioned_importorskip("scipy.interpolate")
# GH 16361
df = {"a": [1, 3, 1, 4]}
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas.core.dtypes.common import (
is_object_dtype,
is_string_dtype,
@pytest.mark.parametrize("dtype", ["int64", "int64[pyarrow]"])
def test_merge_arrow_and_numpy_dtypes(dtype):
# GH#52406
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = DataFrame({"a": [1, 2]}, dtype=dtype)
df2 = DataFrame({"a": [1, 2]}, dtype="int64[pyarrow]")
result = df.merge(df2)
def test_merge_arrow_string_index(any_string_dtype):
# GH#54894
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
left = DataFrame({"a": ["a", "b"]}, dtype=any_string_dtype)
right = DataFrame({"b": 1}, index=Index(["a", "c"], dtype=any_string_dtype))
result = left.merge(right, left_on="a", right_index=True, how="left")
import numpy as np
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
def test_wide_to_long_pyarrow_string_columns():
# GH 57066
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
df = DataFrame(
{
"ID": {0: 1},
import pytest
+import pandas.util._test_decorators as td
from pandas import (
ArrowDtype,
Series,
)
import pandas._testing as tm
-pa = pytest.importorskip("pyarrow")
+pa = td.versioned_importorskip("pyarrow")
from pandas.compat import pa_version_under11p0
import pytest
+import pandas.util._test_decorators as td
from pandas.compat.pyarrow import (
pa_version_under11p0,
pa_version_under13p0,
)
import pandas._testing as tm
-pa = pytest.importorskip("pyarrow")
-pc = pytest.importorskip("pyarrow.compute")
+pa = td.versioned_importorskip("pyarrow")
+pc = td.versioned_importorskip("pyarrow.compute")
def test_struct_accessor_dtypes():
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas._libs import lib
import pandas as pd
def test_convert_dtypes_pyarrow_to_np_nullable(self):
# GH 53648
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
ser = pd.Series(range(2), dtype="int32[pyarrow]")
result = ser.convert_dtypes(dtype_backend="numpy_nullable")
expected = pd.Series(range(2), dtype="Int32")
def test_convert_dtypes_pyarrow_null(self):
# GH#55346
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
ser = pd.Series([None, None])
result = ser.convert_dtypes(dtype_backend="pyarrow")
expected = pd.Series([None, None], dtype=pd.ArrowDtype(pa.null()))
import numpy as np
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Series,
class TestSeriesCorr:
@pytest.mark.parametrize("dtype", ["float64", "Float64"])
def test_corr(self, datetime_series, dtype):
- stats = pytest.importorskip("scipy.stats")
+ stats = td.versioned_importorskip("scipy.stats")
datetime_series = datetime_series.astype(dtype)
tm.assert_almost_equal(result, expected)
def test_corr_rank(self):
- stats = pytest.importorskip("scipy.stats")
+ stats = td.versioned_importorskip("scipy.stats")
# kendall and spearman
A = Series(
import numpy as np
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical,
tm.assert_series_equal(result, expected)
def test_duplicated_arrow_dtype(self):
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
ser = Series([True, False, None, False], dtype="bool[pyarrow]")
result = ser.drop_duplicates()
expected = Series([True, False, None], dtype="bool[pyarrow]")
def test_drop_duplicates_arrow_strings(self):
# GH#54904
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
ser = Series(["a", "a"], dtype=pd.ArrowDtype(pa.string()))
result = ser.drop_duplicates()
expecetd = Series(["a"], dtype=pd.ArrowDtype(pa.string()))
import numpy as np
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize("ignore_index", [True, False])
def test_explode_pyarrow_list_type(ignore_index):
# GH 53602
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
data = [
[None, None],
@pytest.mark.parametrize("ignore_index", [True, False])
def test_explode_pyarrow_non_list_type(ignore_index):
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
data = [1, 2, 3]
ser = pd.Series(data, dtype=pd.ArrowDtype(pa.int64()))
result = ser.explode(ignore_index=ignore_index)
non_ts.interpolate(method="time")
def test_interpolate_cubicspline(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
ser = Series([10, 11, 12, 13])
expected = Series(
tm.assert_series_equal(result, expected)
def test_interpolate_pchip(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
ser = Series(np.sort(np.random.default_rng(2).uniform(size=100)))
# interpolate at new_index
interp_s.loc[49:51]
def test_interpolate_akima(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
ser = Series([10, 11, 12, 13])
# interpolate at new_index where `der` is zero
tm.assert_series_equal(interp_s.loc[1:3], expected)
def test_interpolate_piecewise_polynomial(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
ser = Series([10, 11, 12, 13])
expected = Series(
tm.assert_series_equal(interp_s.loc[1:3], expected)
def test_interpolate_from_derivatives(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
ser = Series([10, 11, 12, 13])
expected = Series(
tm.assert_series_equal(result, expected)
def test_interp_quad(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method="quadratic")
expected = Series([1.0, 4.0, 9.0, 16.0], index=[1, 2, 3, 4])
tm.assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1.0, 3.0, 7.5, 12.0, 18.5, 25.0])
tm.assert_series_equal(result, expected)
def test_interp_all_good(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
s = Series([1, 2, 3])
result = s.interpolate(method="polynomial", order=1)
tm.assert_series_equal(result, s)
s.interpolate(method="polynomial", order=1)
def test_interp_nonmono_raise(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
s = Series([1, np.nan, 3], index=[0, 2, 1])
msg = "krogh interpolation requires that the index be monotonic"
with pytest.raises(ValueError, match=msg):
@pytest.mark.parametrize("method", ["nearest", "pad"])
def test_interp_datetime64(self, method, tz_naive_fixture):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = Series(
[1, np.nan, 3], index=date_range("1/1/2000", periods=3, tz=tz_naive_fixture)
)
@pytest.mark.parametrize("method", ["polynomial", "spline"])
def test_no_order(self, method):
# see GH-10633, GH-24014
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
s = Series([0, 1, np.nan, 3])
msg = "You must specify the order of the spline or polynomial"
with pytest.raises(ValueError, match=msg):
@pytest.mark.parametrize("order", [-1, -1.0, 0, 0.0, np.nan])
def test_interpolate_spline_invalid_order(self, order):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
s = Series([0, 1, np.nan, 3])
msg = "order needs to be specified and greater than 0"
with pytest.raises(ValueError, match=msg):
s.interpolate(method="spline", order=order)
def test_spline(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
s = Series([1, 2, np.nan, 4, 5, np.nan, 7])
result = s.interpolate(method="spline", order=1)
expected = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])
tm.assert_series_equal(result, expected)
def test_spline_extrapolate(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
result3 = s.interpolate(method="spline", order=1, ext=3)
expected3 = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 6.0])
tm.assert_series_equal(result1, expected1)
def test_spline_smooth(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7])
assert (
s.interpolate(method="spline", order=3, s=0)[5]
def test_spline_interpolation(self):
# Explicit cast to float to avoid implicit cast when setting np.nan
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
s = Series(np.arange(10) ** 2, dtype="float")
s[np.random.default_rng(2).integers(0, 9, 3)] = np.nan
result1 = s.interpolate(method="spline", order=1)
method, kwargs = interp_methods_ind
if method == "pchip":
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
if method == "linear":
result = df[0].interpolate(**kwargs)
are tested here.
"""
# gh 21662
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
ind = pd.timedelta_range(start=1, periods=4)
df = pd.DataFrame([0, 1, np.nan, 3], index=ind)
def test_interpolate_fill_value(self):
# GH#54920
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
ser = Series([np.nan, 0, 1, np.nan, 3, np.nan])
result = ser.interpolate(method="nearest", fill_value=0)
expected = Series([np.nan, 0, 1, 1, 3, 0])
class TestSeriesRank:
def test_rank(self, datetime_series):
- sp_stats = pytest.importorskip("scipy.stats")
+ sp_stats = td.versioned_importorskip("scipy.stats")
datetime_series[::2] = np.nan
datetime_series[:10:3] = 4.0
def test_rank_tie_methods_on_infs_nans(
self, method, na_option, ascending, dtype, na_value, pos_inf, neg_inf
):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
if dtype == "float64[pyarrow]":
if method == "average":
exp_dtype = "float64[pyarrow]"
],
)
def test_rank_methods_series(self, method, op, value):
- sp_stats = pytest.importorskip("scipy.stats")
+ sp_stats = td.versioned_importorskip("scipy.stats")
xs = np.random.default_rng(2).standard_normal(9)
xs = np.concatenate([xs[i:] for i in range(0, 9, 2)]) # add duplicates
import numpy as np
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
def test_reset_index_drop_infer_string(self):
# GH#56160
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
ser = Series(["a", "b", "c"], dtype=object)
with option_context("future.infer_string", True):
result = ser.reset_index(drop=True)
import numpy as np
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
def test_inspect_getmembers(self):
# GH38782
- pytest.importorskip("jinja2")
+ td.versioned_importorskip("jinja2")
ser = Series(dtype=object)
msg = "Series._data is deprecated"
with tm.assert_produces_warning(
def test_series_string_inference(self):
# GH#54430
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
dtype = "string[pyarrow_numpy]"
expected = Series(["a", "b"], dtype=dtype)
with pd.option_context("future.infer_string", True):
@pytest.mark.parametrize("na_value", [None, np.nan, pd.NA])
def test_series_string_with_na_inference(self, na_value):
# GH#54430
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
dtype = "string[pyarrow_numpy]"
expected = Series(["a", na_value], dtype=dtype)
with pd.option_context("future.infer_string", True):
def test_series_string_inference_scalar(self):
# GH#54430
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
expected = Series("a", index=[1], dtype="string[pyarrow_numpy]")
with pd.option_context("future.infer_string", True):
ser = Series("a", index=[1])
def test_series_string_inference_array_string_dtype(self):
# GH#54496
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
expected = Series(["a", "b"], dtype="string[pyarrow_numpy]")
with pd.option_context("future.infer_string", True):
ser = Series(np.array(["a", "b"]))
def test_series_string_inference_storage_definition(self):
# GH#54793
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
expected = Series(["a", "b"], dtype="string[pyarrow_numpy]")
with pd.option_context("future.infer_string", True):
result = Series(["a", "b"], dtype="string")
def test_series_string_inference_na_first(self):
# GH#55655
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
expected = Series([pd.NA, "b"], dtype="string[pyarrow_numpy]")
with pd.option_context("future.infer_string", True):
result = Series([pd.NA, "b"])
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas._config import using_pyarrow_string_dtype
import pandas as pd
repr(ts2).splitlines()[-1]
def test_latex_repr(self):
- pytest.importorskip("jinja2") # uses Styler implementation
+ td.versioned_importorskip("jinja2") # uses Styler implementation
result = r"""\begin{tabular}{ll}
\toprule
& 0 \\
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Index,
def test_pyarrow_numpy_string_invalid(self):
# GH#56008
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
ser = Series([False, True])
ser2 = Series(["a", "b"], dtype="string[pyarrow_numpy]")
result = ser == ser2
import numpy as np
import pytest
+import pandas.util._test_decorators as td
import pandas as pd
from pandas import Series
import pandas._testing as tm
def test_mode_infer_string():
# GH#56183
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
ser = Series(["a", "b"], dtype=object)
with pd.option_context("future.infer_string", True):
result = ser.mode()
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import ArrowDtype
from pandas import (
def test_extractall_preserves_dtype():
# Ensure that when extractall is called on a series with specific dtypes set, that
# the dtype is preserved in the resulting DataFrame's column.
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
result = Series(["abc", "ab"], dtype=ArrowDtype(pa.string())).str.extractall("(ab)")
assert result.dtypes[0] == "string[pyarrow]"
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas._libs import (
algos as libalgos,
hashtable as ht,
],
)
def test_scipy_compat(self, arr):
- sp_stats = pytest.importorskip("scipy.stats")
+ sp_stats = td.versioned_importorskip("scipy.stats")
arr = np.array(arr)
olduse = pd.get_option("compute.use_numexpr")
try:
- pytest.importorskip("toolz")
- dd = pytest.importorskip("dask.dataframe")
+ td.versioned_importorskip("toolz")
+ dd = td.versioned_importorskip("dask.dataframe")
ddf = dd.from_pandas(df, npartitions=3)
assert ddf.A is not None
olduse = pd.get_option("compute.use_numexpr")
try:
- da = pytest.importorskip("dask.array")
- dd = pytest.importorskip("dask.dataframe")
+ da = td.versioned_importorskip("dask.array")
+ dd = td.versioned_importorskip("dask.dataframe")
s = Series([1.5, 2.3, 3.7, 4.0])
ds = dd.from_pandas(s, npartitions=2)
def test_construct_dask_float_array_int_dtype_match_ndarray():
# GH#40110 make sure we treat a float-dtype dask array with the same
# rules we would for an ndarray
- dd = pytest.importorskip("dask.dataframe")
+ dd = td.versioned_importorskip("dask.dataframe")
arr = np.array([1, 2.5, 3])
darr = dd.from_array(arr)
def test_xarray(df):
- pytest.importorskip("xarray")
+ td.versioned_importorskip("xarray")
assert df.to_xarray() is not None
def test_xarray_cftimeindex_nearest():
# https://github.com/pydata/xarray/issues/3751
- cftime = pytest.importorskip("cftime")
- xarray = pytest.importorskip("xarray")
+ cftime = td.versioned_importorskip("cftime")
+ xarray = td.versioned_importorskip("xarray")
times = xarray.cftime_range("0001", periods=2)
key = cftime.DatetimeGregorian(2000, 1, 1)
def test_statsmodels():
- smf = pytest.importorskip("statsmodels.formula.api")
+ smf = td.versioned_importorskip("statsmodels.formula.api")
df = DataFrame(
{"Lottery": range(5), "Literacy": range(5), "Pop1831": range(100, 105)}
def test_scikit_learn():
- pytest.importorskip("sklearn")
+ td.versioned_importorskip("sklearn")
from sklearn import (
datasets,
svm,
def test_seaborn():
- seaborn = pytest.importorskip("seaborn")
+ seaborn = td.versioned_importorskip("seaborn")
tips = DataFrame(
{"day": pd.date_range("2023", freq="D", periods=5), "total_bill": range(5)}
)
def test_pandas_datareader():
- pytest.importorskip("pandas_datareader")
+ td.versioned_importorskip("pandas_datareader")
@pytest.mark.filterwarnings("ignore:Passing a BlockManager:DeprecationWarning")
def test_pyarrow(df):
- pyarrow = pytest.importorskip("pyarrow")
+ pyarrow = td.versioned_importorskip("pyarrow")
table = pyarrow.Table.from_pandas(df)
result = table.to_pandas()
tm.assert_frame_equal(result, df)
def test_yaml_dump(df):
# GH#42748
- yaml = pytest.importorskip("yaml")
+ yaml = td.versioned_importorskip("yaml")
dumped = yaml.dump(df)
olduse = pd.get_option("compute.use_numexpr")
try:
- da = pytest.importorskip("dask.array")
+ da = td.versioned_importorskip("dask.array")
dda = da.array([1, 2])
df = DataFrame({"a": ["a", "b"]})
Full testing is done at https://github.com/data-apis/dataframe-api-compat,
this is just to check that the entry point works as expected.
"""
- pytest.importorskip("dataframe_api_compat")
+ td.versioned_importorskip("dataframe_api_compat")
df_pd = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
df = df_pd.__dataframe_consortium_standard__()
result_1 = df.get_column_names()
def test_xarray_coerce_unit():
# GH44053
- xr = pytest.importorskip("xarray")
+ xr = td.versioned_importorskip("xarray")
arr = xr.DataArray([1, 2, 3])
result = pd.to_datetime(arr, unit="ns")
@pytest.mark.parametrize("ddof", range(3))
def test_nansem(self, ddof, skipna):
- sp_stats = pytest.importorskip("scipy.stats")
+ sp_stats = td.versioned_importorskip("scipy.stats")
with np.errstate(invalid="ignore"):
self.check_funs(
return result
def test_nanskew(self, skipna):
- sp_stats = pytest.importorskip("scipy.stats")
+ sp_stats = td.versioned_importorskip("scipy.stats")
func = partial(self._skew_kurt_wrap, func=sp_stats.skew)
with np.errstate(invalid="ignore"):
)
def test_nankurt(self, skipna):
- sp_stats = pytest.importorskip("scipy.stats")
+ sp_stats = td.versioned_importorskip("scipy.stats")
func1 = partial(sp_stats.kurtosis, fisher=True)
func = partial(self._skew_kurt_wrap, func=func1)
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="pearson")
def test_nancorr_kendall(self):
- sp_stats = pytest.importorskip("scipy.stats")
+ sp_stats = td.versioned_importorskip("scipy.stats")
targ0 = sp_stats.kendalltau(self.arr_float_2d, self.arr_float1_2d)[0]
targ1 = sp_stats.kendalltau(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="kendall")
def test_nancorr_spearman(self):
- sp_stats = pytest.importorskip("scipy.stats")
+ sp_stats = td.versioned_importorskip("scipy.stats")
targ0 = sp_stats.spearmanr(self.arr_float_2d, self.arr_float1_2d)[0]
targ1 = sp_stats.spearmanr(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="spearman")
def test_invalid_method(self):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]
msg = "Unknown method 'foo', expected one of 'kendall', 'spearman'"
import pytest
+import pandas.util._test_decorators as td
from pandas.compat._optional import (
VERSIONS,
import_optional_dependency,
def test_xlrd_version_fallback():
- pytest.importorskip("xlrd")
+ td.versioned_importorskip("xlrd")
import_optional_dependency("xlrd")
@pytest.mark.parametrize("utc", [True, False])
@pytest.mark.parametrize("tz", [None, "US/Central"])
def test_to_datetime_arrow(self, tz, utc, arg_class):
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
dti = date_range("1965-04-03", periods=19, freq="2W", tz=tz)
dti = arg_class(dti)
def test_to_datetime_tz_psycopg2(self, request, cache):
# xref 8260
- psycopg2_tz = pytest.importorskip("psycopg2.tz")
+ psycopg2_tz = td.versioned_importorskip("psycopg2.tz")
# misc cases
tz1 = psycopg2_tz.FixedOffsetTimezone(offset=-300, name=None)
def test_from_numeric_arrow_dtype(any_numeric_ea_dtype):
# GH 52425
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
ser = Series([1, 2], dtype=f"{any_numeric_ea_dtype.lower()}[pyarrow]")
result = to_datetime(ser)
expected = Series([1, 2], dtype="datetime64[ns]")
def test_to_numeric_dtype_backend_na(val, dtype):
# GH#50505
if "pyarrow" in dtype:
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
dtype_backend = "pyarrow"
else:
dtype_backend = "numpy_nullable"
def test_to_numeric_dtype_backend_downcasting(val, dtype, downcast):
# GH#50505
if "pyarrow" in dtype:
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
dtype_backend = "pyarrow"
else:
dtype_backend = "numpy_nullable"
def test_to_numeric_dtype_backend_downcasting_uint(smaller, dtype_backend):
# GH#50505
if dtype_backend == "pyarrow":
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
ser = Series([1, pd.NA], dtype="UInt64")
result = to_numeric(ser, dtype_backend=dtype_backend, downcast="unsigned")
expected = Series([1, pd.NA], dtype=smaller)
def test_to_numeric_dtype_backend_already_nullable(dtype):
# GH#50505
if "pyarrow" in dtype:
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
ser = Series([1, pd.NA], dtype=dtype)
result = to_numeric(ser, dtype_backend="numpy_nullable")
expected = Series([1, pd.NA], dtype=dtype)
def test_coerce_pyarrow_backend():
# GH 52588
- pa = pytest.importorskip("pyarrow")
+ pa = td.versioned_importorskip("pyarrow")
ser = Series(list("12x"), dtype=ArrowDtype(pa.string()))
result = to_numeric(ser, errors="coerce", dtype_backend="pyarrow")
expected = Series([1, 2, None], dtype=ArrowDtype(pa.int64()))
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas.compat import IS64
from pandas.errors import OutOfBoundsTimedelta
def test_from_numeric_arrow_dtype(any_numeric_ea_dtype):
# GH 52425
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
ser = Series([1, 2], dtype=f"{any_numeric_ea_dtype.lower()}[pyarrow]")
result = to_timedelta(ser)
expected = Series([1, 2], dtype="timedelta64[ns]")
@pytest.mark.parametrize("unit", ["ns", "ms"])
def test_from_timedelta_arrow_dtype(unit):
# GH 54298
- pytest.importorskip("pyarrow")
+ td.versioned_importorskip("pyarrow")
expected = Series([timedelta(1)], dtype=f"duration[{unit}][pyarrow]")
result = to_timedelta(expected)
tm.assert_series_equal(result, expected)
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
pytestmark = pytest.mark.single_cpu
-pytest.importorskip("numba")
+td.versioned_importorskip("numba")
@pytest.mark.filterwarnings("ignore")
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
def test_series(series, sp_func, roll_func):
- sp_stats = pytest.importorskip("scipy.stats")
+ sp_stats = td.versioned_importorskip("scipy.stats")
compare_func = partial(getattr(sp_stats, sp_func), bias=False)
result = getattr(series.rolling(50), roll_func)()
@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
def test_frame(raw, frame, sp_func, roll_func):
- sp_stats = pytest.importorskip("scipy.stats")
+ sp_stats = td.versioned_importorskip("scipy.stats")
compare_func = partial(getattr(sp_stats, sp_func), bias=False)
result = getattr(frame.rolling(50), roll_func)()
@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
def test_time_rule_series(series, sp_func, roll_func):
- sp_stats = pytest.importorskip("scipy.stats")
+ sp_stats = td.versioned_importorskip("scipy.stats")
compare_func = partial(getattr(sp_stats, sp_func), bias=False)
win = 25
@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
def test_time_rule_frame(raw, frame, sp_func, roll_func):
- sp_stats = pytest.importorskip("scipy.stats")
+ sp_stats = td.versioned_importorskip("scipy.stats")
compare_func = partial(getattr(sp_stats, sp_func), bias=False)
win = 25
@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
def test_nans(sp_func, roll_func):
- sp_stats = pytest.importorskip("scipy.stats")
+ sp_stats = td.versioned_importorskip("scipy.stats")
compare_func = partial(getattr(sp_stats, sp_func), bias=False)
obj = Series(np.random.default_rng(2).standard_normal(50))
import numpy as np
import pytest
+import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
def test_constructor(frame_or_series):
# GH 12669
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
c = frame_or_series(range(5)).rolling
# valid
@pytest.mark.parametrize("w", [2.0, "foo", np.array([2])])
def test_invalid_constructor(frame_or_series, w):
# not valid
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
c = frame_or_series(range(5)).rolling
with pytest.raises(ValueError, match="min_periods must be an integer"):
c(win_type="boxcar", window=2, min_periods=w)
@pytest.mark.parametrize("wt", ["foobar", 1])
def test_invalid_constructor_wintype(frame_or_series, wt):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
c = frame_or_series(range(5)).rolling
with pytest.raises(ValueError, match="Invalid win_type"):
c(win_type=wt, window=2)
def test_constructor_with_win_type(frame_or_series, win_types):
# GH 12669
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
c = frame_or_series(range(5)).rolling
c(win_type=win_types, window=2)
@pytest.mark.parametrize("arg", ["median", "kurt", "skew"])
def test_agg_function_support(arg):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = DataFrame({"A": np.arange(5)})
roll = df.rolling(2, win_type="triang")
def test_invalid_scipy_arg():
# This error is raised by scipy
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
msg = r"boxcar\(\) got an unexpected"
with pytest.raises(TypeError, match=msg):
Series(range(3)).rolling(1, win_type="boxcar").mean(foo="bar")
def test_constructor_with_win_type_invalid(frame_or_series):
# GH 13383
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
c = frame_or_series(range(5)).rolling
msg = "window must be an integer 0 or greater"
def test_window_with_args(step):
# make sure that we are aggregating window functions correctly with arg
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
r = Series(np.random.default_rng(2).standard_normal(100)).rolling(
window=10, min_periods=1, win_type="gaussian", step=step
)
def test_win_type_with_method_invalid():
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
with pytest.raises(
NotImplementedError, match="'single' is the only supported method type."
):
@pytest.mark.parametrize("arg", [2000000000, "2s", Timedelta("2s")])
def test_consistent_win_type_freq(arg):
# GH 15969
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
s = Series(range(1))
with pytest.raises(ValueError, match="Invalid win_type freq"):
s.rolling(arg, win_type="freq")
def test_win_type_not_implemented():
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
class CustomIndexer(BaseIndexer):
def get_window_bounds(self, num_values, min_periods, center, closed, step):
def test_cmov_mean(step):
# GH 8238
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
result = Series(vals).rolling(5, center=True, step=step).mean()
expected_values = [
def test_cmov_window(step):
# GH 8238
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
result = Series(vals).rolling(5, win_type="boxcar", center=True, step=step).mean()
expected_values = [
def test_cmov_window_corner(step):
# GH 8238
# all nan
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
vals = Series([np.nan] * 10)
result = vals.rolling(5, center=True, win_type="boxcar", step=step).mean()
assert np.isnan(result).all()
)
def test_cmov_window_frame(f, xp, step):
# Gh 8238
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = DataFrame(
np.array(
[
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4, 5])
def test_cmov_window_na_min_periods(step, min_periods):
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
vals = Series(np.random.default_rng(2).standard_normal(10))
vals[4] = np.nan
vals[8] = np.nan
def test_cmov_window_regular(win_types, step):
# GH 8238
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
xps = {
"hamming": [
def test_cmov_window_regular_linear_range(win_types, step):
# GH 8238
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
vals = np.array(range(10), dtype=float)
xp = vals.copy()
xp[:2] = np.nan
def test_cmov_window_regular_missing_data(win_types, step):
# GH 8238
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, np.nan, 10.63, 14.48]
)
def test_cmov_window_special(win_types_special, step):
# GH 8238
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
kwds = {
"kaiser": {"beta": 1.0},
"gaussian": {"std": 1.0},
def test_cmov_window_special_linear_range(win_types_special, step):
# GH 8238
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
kwds = {
"kaiser": {"beta": 1.0},
"gaussian": {"std": 1.0},
def test_weighted_var_big_window_no_segfault(win_types, center):
# GitHub Issue #46772
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
x = Series(0)
result = x.rolling(window=16, center=center, win_type=win_types).var()
expected = Series(np.nan)
def test_rolling_center_axis_1():
- pytest.importorskip("scipy")
+ td.versioned_importorskip("scipy")
df = DataFrame(
{"a": [1, 1, 0, 0, 0, 1], "b": [1, 0, 0, 1, 0, 0], "c": [1, 0, 0, 1, 0, 1]}
)
The mark can be used as either a decorator for a test class or to be
applied to parameters in pytest.mark.parametrize calls or parametrized
- fixtures. Use pytest.importorskip if an imported moduled is later needed
- or for test functions.
+ fixtures. Use td.versioned_importorskip if an imported module is later
+ needed or for test functions.
If the import and version check are unsuccessful, then the test function
(or test case when used in conjunction with parametrization) will be
get_option("mode.copy_on_write") is True,
reason="Test not valid for Copy-on-Write mode",
)
+
+def versioned_importorskip(*args, **kwargs):
+ """
+ (warning - this is currently Debian-specific, the name may change if upstream request this)
+
+ Return the requested module, or skip the test if it is
+ not available in a new enough version.
+
+ Intended as a replacement for pytest.importorskip that
+ defaults to requiring at least pandas' minimum version for that
+ optional dependency, rather than any version.
+
+ See import_optional_dependency for full parameter documentation.
+ """
+ try:
+ module = import_optional_dependency(*args, **kwargs)
+ except ImportError as exc:
+ pytest.skip(str(exc), allow_module_level=True)
+ return module