From 96329f117b508d5de310bf630134e87b2650aba3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Diridollou?= Date: Tue, 24 Jun 2025 16:57:01 -0400 Subject: [PATCH 1/2] GH851 Change use of NoDefault to make it more private --- pandas-stubs/_libs/lib.pyi | 2 +- pandas-stubs/api/typing/__init__.pyi | 2 +- pandas-stubs/core/frame.pyi | 66 +++++++++++++-------------- pandas-stubs/core/generic.pyi | 4 +- pandas-stubs/core/groupby/groupby.pyi | 34 +++++++++----- pandas-stubs/core/groupby/grouper.pyi | 4 +- pandas-stubs/core/series.pyi | 22 ++++----- pandas-stubs/core/tools/numeric.pyi | 8 ++-- pandas-stubs/io/clipboards.pyi | 8 ++-- pandas-stubs/io/excel/_base.pyi | 10 ++-- pandas-stubs/io/feather_format.pyi | 4 +- pandas-stubs/io/html.pyi | 4 +- pandas-stubs/io/json/_json.pyi | 18 ++++---- pandas-stubs/io/orc.pyi | 4 +- pandas-stubs/io/parsers/readers.pyi | 14 +++--- pandas-stubs/io/spss.pyi | 4 +- pandas-stubs/io/sql.pyi | 14 +++--- pandas-stubs/io/xml.pyi | 4 +- 18 files changed, 119 insertions(+), 107 deletions(-) diff --git a/pandas-stubs/_libs/lib.pyi b/pandas-stubs/_libs/lib.pyi index 7622a13a5..e06eda996 100644 --- a/pandas-stubs/_libs/lib.pyi +++ b/pandas-stubs/_libs/lib.pyi @@ -14,7 +14,7 @@ class _NoDefault(Enum): no_default = ... no_default: Final = _NoDefault.no_default -NoDefault: TypeAlias = Literal[_NoDefault.no_default] +NoDefaultDoNotUse: TypeAlias = Literal[_NoDefault.no_default] def infer_dtype(value: object, skipna: bool = ...) -> str: ... def is_iterator(obj: object) -> bool: ... diff --git a/pandas-stubs/api/typing/__init__.pyi b/pandas-stubs/api/typing/__init__.pyi index 1e3ae7140..f2259d219 100644 --- a/pandas-stubs/api/typing/__init__.pyi +++ b/pandas-stubs/api/typing/__init__.pyi @@ -21,7 +21,7 @@ from pandas.core.window import ( ) from pandas._libs import NaTType as NaTType -from pandas._libs.lib import NoDefault as NoDefault +from pandas._libs.lib import NoDefaultDoNotUse as NoDefaultDoNotUse from pandas._libs.missing import NAType as NAType from pandas.io.json._json import JsonReader as JsonReader diff --git a/pandas-stubs/core/frame.pyi b/pandas-stubs/core/frame.pyi index 90560dc2a..ae640b4e3 100644 --- a/pandas-stubs/core/frame.pyi +++ b/pandas-stubs/core/frame.pyi @@ -72,7 +72,7 @@ from typing_extensions import ( ) import xarray as xr -from pandas._libs.lib import NoDefault +from pandas._libs.lib import NoDefaultDoNotUse from pandas._libs.missing import NAType from pandas._libs.tslibs import BaseOffset from pandas._libs.tslibs.nattype import NaTType @@ -1107,192 +1107,192 @@ class DataFrame(NDFrame, OpsMixin, _GetItemHack): def groupby( # pyright: ignore reportOverlappingOverload self, by: Scalar, - axis: AxisIndex | NoDefault = ..., + axis: AxisIndex | NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[True] = True, sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefault = ..., + observed: _bool | NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[Scalar, Literal[True]]: ... @overload def groupby( self, by: Scalar, - axis: AxisIndex | NoDefault = ..., + axis: AxisIndex | NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[False] = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefault = ..., + observed: _bool | NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[Scalar, Literal[False]]: ... @overload def groupby( # pyright: ignore reportOverlappingOverload self, by: DatetimeIndex, - axis: AxisIndex | NoDefault = ..., + axis: AxisIndex | NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[True] = True, sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefault = ..., + observed: _bool | NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[Timestamp, Literal[True]]: ... @overload def groupby( # pyright: ignore reportOverlappingOverload self, by: DatetimeIndex, - axis: AxisIndex | NoDefault = ..., + axis: AxisIndex | NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[False] = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefault = ..., + observed: _bool | NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[Timestamp, Literal[False]]: ... @overload def groupby( # pyright: ignore reportOverlappingOverload self, by: TimedeltaIndex, - axis: AxisIndex | NoDefault = ..., + axis: AxisIndex | NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[True] = True, sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefault = ..., + observed: _bool | NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[Timedelta, Literal[True]]: ... @overload def groupby( self, by: TimedeltaIndex, - axis: AxisIndex | NoDefault = ..., + axis: AxisIndex | NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[False] = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefault = ..., + observed: _bool | NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[Timedelta, Literal[False]]: ... @overload def groupby( # pyright: ignore reportOverlappingOverload self, by: PeriodIndex, - axis: AxisIndex | NoDefault = ..., + axis: AxisIndex | NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[True] = True, sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefault = ..., + observed: _bool | NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[Period, Literal[True]]: ... @overload def groupby( self, by: PeriodIndex, - axis: AxisIndex | NoDefault = ..., + axis: AxisIndex | NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[False] = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefault = ..., + observed: _bool | NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[Period, Literal[False]]: ... @overload def groupby( # pyright: ignore reportOverlappingOverload self, by: IntervalIndex[IntervalT], - axis: AxisIndex | NoDefault = ..., + axis: AxisIndex | NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[True] = True, sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefault = ..., + observed: _bool | NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[IntervalT, Literal[True]]: ... @overload def groupby( self, by: IntervalIndex[IntervalT], - axis: AxisIndex | NoDefault = ..., + axis: AxisIndex | NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[False] = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefault = ..., + observed: _bool | NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[IntervalT, Literal[False]]: ... @overload def groupby( # type: ignore[overload-overlap] # pyright: ignore reportOverlappingOverload self, by: MultiIndex | GroupByObjectNonScalar | None = ..., - axis: AxisIndex | NoDefault = ..., + axis: AxisIndex | NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[True] = True, sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefault = ..., + observed: _bool | NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[tuple, Literal[True]]: ... @overload def groupby( # type: ignore[overload-overlap] self, by: MultiIndex | GroupByObjectNonScalar | None = ..., - axis: AxisIndex | NoDefault = ..., + axis: AxisIndex | NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[False] = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefault = ..., + observed: _bool | NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[tuple, Literal[False]]: ... @overload def groupby( # pyright: ignore reportOverlappingOverload self, by: Series[SeriesByT], - axis: AxisIndex | NoDefault = ..., + axis: AxisIndex | NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[True] = True, sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefault = ..., + observed: _bool | NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[SeriesByT, Literal[True]]: ... @overload def groupby( self, by: Series[SeriesByT], - axis: AxisIndex | NoDefault = ..., + axis: AxisIndex | NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[False] = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefault = ..., + observed: _bool | NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[SeriesByT, Literal[False]]: ... @overload def groupby( self, by: CategoricalIndex | Index | Series, - axis: AxisIndex | NoDefault = ..., + axis: AxisIndex | NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[True] = True, sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefault = ..., + observed: _bool | NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[Any, Literal[True]]: ... @overload def groupby( self, by: CategoricalIndex | Index | Series, - axis: AxisIndex | NoDefault = ..., + axis: AxisIndex | NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[False] = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefault = ..., + observed: _bool | NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[Any, Literal[False]]: ... def pivot( diff --git a/pandas-stubs/core/generic.pyi b/pandas-stubs/core/generic.pyi index edcc45d8b..e3257a20f 100644 --- a/pandas-stubs/core/generic.pyi +++ b/pandas-stubs/core/generic.pyi @@ -32,7 +32,7 @@ from typing_extensions import ( Self, ) -from pandas._libs.lib import NoDefault +from pandas._libs.lib import NoDefaultDoNotUse from pandas._typing import ( Axis, CompressionOptions, @@ -404,7 +404,7 @@ class NDFrame(indexing.IndexingMixin): def resample( self, rule: Frequency | dt.timedelta, - axis: Axis | NoDefault = ..., + axis: Axis | NoDefaultDoNotUse = ..., closed: Literal["right", "left"] | None = ..., label: Literal["right", "left"] | None = ..., convention: ToTimestampHow = ..., diff --git a/pandas-stubs/core/groupby/groupby.pyi b/pandas-stubs/core/groupby/groupby.pyi index c7af2d4fa..c0cfec39e 100644 --- a/pandas-stubs/core/groupby/groupby.pyi +++ b/pandas-stubs/core/groupby/groupby.pyi @@ -46,7 +46,7 @@ from typing_extensions import ( TypeAlias, ) -from pandas._libs.lib import NoDefault +from pandas._libs.lib import NoDefaultDoNotUse from pandas._libs.tslibs import BaseOffset from pandas._typing import ( S1, @@ -283,39 +283,51 @@ class GroupBy(BaseGroupBy[NDFrameT]): ascending: bool = ..., na_option: str = ..., pct: bool = ..., - axis: AxisInt | NoDefault = ..., + axis: AxisInt | NoDefaultDoNotUse = ..., ) -> NDFrameT: ... @final - def cumprod(self, axis: Axis | NoDefault = ..., *args, **kwargs) -> NDFrameT: ... + def cumprod( + self, axis: Axis | NoDefaultDoNotUse = ..., *args, **kwargs + ) -> NDFrameT: ... @final - def cumsum(self, axis: Axis | NoDefault = ..., *args, **kwargs) -> NDFrameT: ... + def cumsum( + self, axis: Axis | NoDefaultDoNotUse = ..., *args, **kwargs + ) -> NDFrameT: ... @final def cummin( - self, axis: AxisInt | NoDefault = ..., numeric_only: bool = ..., **kwargs + self, + axis: AxisInt | NoDefaultDoNotUse = ..., + numeric_only: bool = ..., + **kwargs, ) -> NDFrameT: ... @final def cummax( - self, axis: AxisInt | NoDefault = ..., numeric_only: bool = ..., **kwargs + self, + axis: AxisInt | NoDefaultDoNotUse = ..., + numeric_only: bool = ..., + **kwargs, ) -> NDFrameT: ... @final def shift( self, periods: int | Sequence[int] = ..., freq: Frequency | None = ..., - axis: Axis | NoDefault = ..., + axis: Axis | NoDefaultDoNotUse = ..., fill_value=..., suffix: str | None = ..., ) -> NDFrameT: ... @final - def diff(self, periods: int = ..., axis: AxisInt | NoDefault = ...) -> NDFrameT: ... + def diff( + self, periods: int = ..., axis: AxisInt | NoDefaultDoNotUse = ... + ) -> NDFrameT: ... @final def pct_change( self, periods: int = ..., - fill_method: Literal["bfill", "ffill"] | None | NoDefault = ..., - limit: int | None | NoDefault = ..., + fill_method: Literal["bfill", "ffill"] | None | NoDefaultDoNotUse = ..., + limit: int | None | NoDefaultDoNotUse = ..., freq=..., - axis: Axis | NoDefault = ..., + axis: Axis | NoDefaultDoNotUse = ..., ) -> NDFrameT: ... @final def head(self, n: int = ...) -> NDFrameT: ... diff --git a/pandas-stubs/core/groupby/grouper.pyi b/pandas-stubs/core/groupby/grouper.pyi index 8117f0254..d8a604da8 100644 --- a/pandas-stubs/core/groupby/grouper.pyi +++ b/pandas-stubs/core/groupby/grouper.pyi @@ -16,7 +16,7 @@ from pandas import ( from pandas.core.resample import TimeGrouper from typing_extensions import Self -from pandas._libs.lib import NoDefault +from pandas._libs.lib import NoDefaultDoNotUse from pandas._typing import ( ArrayLike, Axis, @@ -42,7 +42,7 @@ class Grouper: cls, key: KeysArgType | None = ..., level: Level | ListLikeHashable[Level] | None = ..., - axis: Axis | NoDefault = ..., + axis: Axis | NoDefaultDoNotUse = ..., sort: bool = ..., dropna: bool = ..., ) -> Self: ... diff --git a/pandas-stubs/core/series.pyi b/pandas-stubs/core/series.pyi index 1982d54bc..92b36d0d9 100644 --- a/pandas-stubs/core/series.pyi +++ b/pandas-stubs/core/series.pyi @@ -96,7 +96,7 @@ from pandas._libs.interval import ( Interval, _OrderableT, ) -from pandas._libs.lib import NoDefault +from pandas._libs.lib import NoDefaultDoNotUse from pandas._libs.missing import NAType from pandas._libs.tslibs import BaseOffset from pandas._libs.tslibs.nattype import NaTType @@ -629,7 +629,7 @@ class Series(IndexOpsMixin[S1], NDFrame): as_index: _bool = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefault = ..., + observed: _bool | NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> SeriesGroupBy[S1, Scalar]: ... @overload @@ -641,7 +641,7 @@ class Series(IndexOpsMixin[S1], NDFrame): as_index: _bool = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefault = ..., + observed: _bool | NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> SeriesGroupBy[S1, Timestamp]: ... @overload @@ -653,7 +653,7 @@ class Series(IndexOpsMixin[S1], NDFrame): as_index: _bool = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefault = ..., + observed: _bool | NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> SeriesGroupBy[S1, Timedelta]: ... @overload @@ -665,7 +665,7 @@ class Series(IndexOpsMixin[S1], NDFrame): as_index: _bool = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefault = ..., + observed: _bool | NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> SeriesGroupBy[S1, Period]: ... @overload @@ -677,7 +677,7 @@ class Series(IndexOpsMixin[S1], NDFrame): as_index: _bool = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefault = ..., + observed: _bool | NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> SeriesGroupBy[S1, IntervalT]: ... @overload @@ -689,7 +689,7 @@ class Series(IndexOpsMixin[S1], NDFrame): as_index: _bool = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefault = ..., + observed: _bool | NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> SeriesGroupBy[S1, tuple]: ... @overload @@ -701,7 +701,7 @@ class Series(IndexOpsMixin[S1], NDFrame): as_index: _bool = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefault = ..., + observed: _bool | NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> SeriesGroupBy[S1, Scalar]: ... @overload @@ -714,7 +714,7 @@ class Series(IndexOpsMixin[S1], NDFrame): as_index: _bool = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefault = ..., + observed: _bool | NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> SeriesGroupBy[S1, Scalar]: ... @overload @@ -726,7 +726,7 @@ class Series(IndexOpsMixin[S1], NDFrame): as_index: _bool = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefault = ..., + observed: _bool | NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> SeriesGroupBy[S1, SeriesByT]: ... @overload @@ -738,7 +738,7 @@ class Series(IndexOpsMixin[S1], NDFrame): as_index: _bool = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefault = ..., + observed: _bool | NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> SeriesGroupBy[S1, Any]: ... # need the ignore because None is Hashable diff --git a/pandas-stubs/core/tools/numeric.pyi b/pandas-stubs/core/tools/numeric.pyi index b38c3ba06..66e04d95f 100644 --- a/pandas-stubs/core/tools/numeric.pyi +++ b/pandas-stubs/core/tools/numeric.pyi @@ -7,7 +7,7 @@ import numpy as np import pandas as pd from typing_extensions import TypeAlias -from pandas._libs.lib import NoDefault +from pandas._libs.lib import NoDefaultDoNotUse from pandas._typing import ( DtypeBackend, RaiseCoerce, @@ -22,19 +22,19 @@ def to_numeric( arg: Scalar, errors: Literal["raise", "coerce"] = ..., downcast: _Downcast = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., ) -> float: ... @overload def to_numeric( arg: list | tuple | np.ndarray, errors: RaiseCoerce = ..., downcast: _Downcast = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., ) -> npt.NDArray: ... @overload def to_numeric( arg: pd.Series, errors: RaiseCoerce = ..., downcast: _Downcast = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., ) -> pd.Series: ... diff --git a/pandas-stubs/io/clipboards.pyi b/pandas-stubs/io/clipboards.pyi index e7ef8305d..9dcc7f139 100644 --- a/pandas-stubs/io/clipboards.pyi +++ b/pandas-stubs/io/clipboards.pyi @@ -12,7 +12,7 @@ from typing import ( from pandas.core.frame import DataFrame -from pandas._libs.lib import NoDefault +from pandas._libs.lib import NoDefaultDoNotUse from pandas._typing import ( CompressionOptions, CSVEngine, @@ -30,7 +30,7 @@ from pandas.io.parsers import TextFileReader def read_clipboard( sep: str | None = ..., *, - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., delimiter: str | None = ..., header: int | Sequence[int] | Literal["infer"] | None = ..., names: ListLikeHashable | None = ..., @@ -89,7 +89,7 @@ def read_clipboard( def read_clipboard( sep: str | None = ..., *, - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., delimiter: str | None = ..., header: int | Sequence[int] | Literal["infer"] | None = ..., names: ListLikeHashable | None = ..., @@ -148,7 +148,7 @@ def read_clipboard( def read_clipboard( sep: str | None = ..., *, - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., delimiter: str | None = ..., header: int | Sequence[int] | Literal["infer"] | None = ..., names: ListLikeHashable | None = ..., diff --git a/pandas-stubs/io/excel/_base.pyi b/pandas-stubs/io/excel/_base.pyi index e00ee2eb5..834c902f1 100644 --- a/pandas-stubs/io/excel/_base.pyi +++ b/pandas-stubs/io/excel/_base.pyi @@ -19,7 +19,7 @@ import pyxlsb.workbook from typing_extensions import Self from xlrd.book import Book -from pandas._libs.lib import NoDefault +from pandas._libs.lib import NoDefaultDoNotUse from pandas._typing import ( Dtype, DtypeBackend, @@ -75,7 +75,7 @@ def read_excel( comment: str | None = ..., skipfooter: int = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., engine_kwargs: dict[str, Any] | None = ..., ) -> dict[IntStrT, DataFrame]: ... @overload @@ -118,7 +118,7 @@ def read_excel( comment: str | None = ..., skipfooter: int = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., engine_kwargs: dict[str, Any] | None = ..., ) -> dict[str, DataFrame]: ... @overload @@ -162,7 +162,7 @@ def read_excel( # type: ignore[overload-cannot-match] comment: str | None = ..., skipfooter: int = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., engine_kwargs: dict[str, Any] | None = ..., ) -> dict[int | str, DataFrame]: ... @overload @@ -205,7 +205,7 @@ def read_excel( comment: str | None = ..., skipfooter: int = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., engine_kwargs: dict[str, Any] | None = ..., ) -> DataFrame: ... diff --git a/pandas-stubs/io/feather_format.pyi b/pandas-stubs/io/feather_format.pyi index a54d16b74..fd9aab3e4 100644 --- a/pandas-stubs/io/feather_format.pyi +++ b/pandas-stubs/io/feather_format.pyi @@ -1,6 +1,6 @@ from pandas import DataFrame -from pandas._libs.lib import NoDefault +from pandas._libs.lib import NoDefaultDoNotUse from pandas._typing import ( DtypeBackend, FilePath, @@ -14,5 +14,5 @@ def read_feather( columns: list[HashableT] | None = ..., use_threads: bool = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., ) -> DataFrame: ... diff --git a/pandas-stubs/io/html.pyi b/pandas-stubs/io/html.pyi index 8677138cc..b4f28f7e8 100644 --- a/pandas-stubs/io/html.pyi +++ b/pandas-stubs/io/html.pyi @@ -12,7 +12,7 @@ from typing import ( from pandas.core.frame import DataFrame -from pandas._libs.lib import NoDefault +from pandas._libs.lib import NoDefaultDoNotUse from pandas._typing import ( DtypeBackend, FilePath, @@ -53,6 +53,6 @@ def read_html( keep_default_na: bool = ..., displayed_only: bool = ..., extract_links: Literal["header", "footer", "body", "all"] | None = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., storage_options: StorageOptions = ..., ) -> list[DataFrame]: ... diff --git a/pandas-stubs/io/json/_json.pyi b/pandas-stubs/io/json/_json.pyi index 8e023631c..da24d0c9c 100644 --- a/pandas-stubs/io/json/_json.pyi +++ b/pandas-stubs/io/json/_json.pyi @@ -10,7 +10,7 @@ from typing import ( from pandas.core.frame import DataFrame from pandas.core.series import Series -from pandas._libs.lib import NoDefault +from pandas._libs.lib import NoDefaultDoNotUse from pandas._typing import ( CompressionOptions, DtypeArg, @@ -47,7 +47,7 @@ def read_json( compression: CompressionOptions = ..., nrows: int | None = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., engine: Literal["ujson"] = ..., ) -> JsonReader[Series]: ... @overload @@ -72,7 +72,7 @@ def read_json( compression: CompressionOptions = ..., nrows: int | None = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., engine: Literal["pyarrow"], ) -> JsonReader[Series]: ... @overload @@ -97,7 +97,7 @@ def read_json( compression: CompressionOptions = ..., nrows: int | None = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., engine: Literal["ujson"] = ..., ) -> JsonReader[DataFrame]: ... @overload @@ -122,7 +122,7 @@ def read_json( compression: CompressionOptions = ..., nrows: int | None = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., engine: Literal["pyarrow"], ) -> JsonReader[DataFrame]: ... @overload @@ -147,7 +147,7 @@ def read_json( compression: CompressionOptions = ..., nrows: int | None = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., engine: Literal["ujson"] = ..., ) -> Series: ... @overload @@ -172,7 +172,7 @@ def read_json( compression: CompressionOptions = ..., nrows: int | None = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., engine: Literal["pyarrow"], ) -> Series: ... @overload @@ -197,7 +197,7 @@ def read_json( compression: CompressionOptions = ..., nrows: int | None = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., engine: Literal["ujson"] = ..., ) -> DataFrame: ... @overload @@ -222,7 +222,7 @@ def read_json( compression: CompressionOptions = ..., nrows: int | None = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., engine: Literal["pyarrow"], ) -> DataFrame: ... diff --git a/pandas-stubs/io/orc.pyi b/pandas-stubs/io/orc.pyi index 18e06fede..533ef46ed 100644 --- a/pandas-stubs/io/orc.pyi +++ b/pandas-stubs/io/orc.pyi @@ -2,7 +2,7 @@ from typing import Any from pandas import DataFrame -from pandas._libs.lib import NoDefault +from pandas._libs.lib import NoDefaultDoNotUse from pandas._typing import ( DtypeBackend, FilePath, @@ -13,7 +13,7 @@ from pandas._typing import ( def read_orc( path: FilePath | ReadBuffer[bytes], columns: list[HashableT] | None = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., # TODO type with the correct pyarrow types # filesystem: pyarrow.fs.FileSystem | fsspec.spec.AbstractFileSystem filesystem: Any | None = ..., diff --git a/pandas-stubs/io/parsers/readers.pyi b/pandas-stubs/io/parsers/readers.pyi index 3dbabb652..fb66ba45e 100644 --- a/pandas-stubs/io/parsers/readers.pyi +++ b/pandas-stubs/io/parsers/readers.pyi @@ -19,7 +19,7 @@ from typing import ( from pandas.core.frame import DataFrame from typing_extensions import Self -from pandas._libs.lib import NoDefault +from pandas._libs.lib import NoDefaultDoNotUse from pandas._typing import ( CompressionOptions, CSVEngine, @@ -98,7 +98,7 @@ def read_csv( memory_map: bool = ..., float_precision: Literal["high", "legacy", "round_trip"] | None = ..., storage_options: StorageOptions | None = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., ) -> TextFileReader: ... @overload def read_csv( @@ -162,7 +162,7 @@ def read_csv( memory_map: bool = ..., float_precision: Literal["high", "legacy", "round_trip"] | None = ..., storage_options: StorageOptions | None = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., ) -> TextFileReader: ... @overload def read_csv( @@ -226,7 +226,7 @@ def read_csv( memory_map: bool = ..., float_precision: Literal["high", "legacy", "round_trip"] | None = ..., storage_options: StorageOptions | None = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., ) -> DataFrame: ... @overload def read_table( @@ -427,7 +427,7 @@ def read_fwf( colspecs: Sequence[tuple[int, int]] | Literal["infer"] | None = ..., widths: Sequence[int] | None = ..., infer_nrows: int = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., date_format: dict[Hashable, str] | str | None = ..., iterator: Literal[True], chunksize: int | None = ..., @@ -440,7 +440,7 @@ def read_fwf( colspecs: Sequence[tuple[int, int]] | Literal["infer"] | None = ..., widths: Sequence[int] | None = ..., infer_nrows: int = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., date_format: dict[Hashable, str] | str | None = ..., iterator: bool = ..., chunksize: int, @@ -453,7 +453,7 @@ def read_fwf( colspecs: Sequence[tuple[int, int]] | Literal["infer"] | None = ..., widths: Sequence[int] | None = ..., infer_nrows: int = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., date_format: dict[Hashable, str] | str | None = ..., iterator: Literal[False] = ..., chunksize: None = ..., diff --git a/pandas-stubs/io/spss.pyi b/pandas-stubs/io/spss.pyi index d63089845..c5e886c08 100644 --- a/pandas-stubs/io/spss.pyi +++ b/pandas-stubs/io/spss.pyi @@ -1,6 +1,6 @@ from pandas.core.frame import DataFrame -from pandas._libs.lib import NoDefault +from pandas._libs.lib import NoDefaultDoNotUse from pandas._typing import ( DtypeBackend, FilePath, @@ -11,5 +11,5 @@ def read_spss( path: FilePath, usecols: list[HashableT] | None = ..., convert_categoricals: bool = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., ) -> DataFrame: ... diff --git a/pandas-stubs/io/sql.pyi b/pandas-stubs/io/sql.pyi index 47c55e1aa..674abcb6f 100644 --- a/pandas-stubs/io/sql.pyi +++ b/pandas-stubs/io/sql.pyi @@ -17,7 +17,7 @@ from sqlalchemy.orm import FromStatement import sqlalchemy.sql.expression from typing_extensions import TypeAlias -from pandas._libs.lib import NoDefault +from pandas._libs.lib import NoDefaultDoNotUse from pandas._typing import ( DtypeArg, DtypeBackend, @@ -47,7 +47,7 @@ def read_sql_table( columns: list[str] | None = ..., *, chunksize: int, - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., ) -> Generator[DataFrame, None, None]: ... @overload def read_sql_table( @@ -59,7 +59,7 @@ def read_sql_table( parse_dates: list[str] | dict[str, str] | dict[str, dict[str, Any]] | None = ..., columns: list[str] | None = ..., chunksize: None = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., ) -> DataFrame: ... @overload def read_sql_query( @@ -79,7 +79,7 @@ def read_sql_query( *, chunksize: int, dtype: DtypeArg | None = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., ) -> Generator[DataFrame, None, None]: ... @overload def read_sql_query( @@ -98,7 +98,7 @@ def read_sql_query( parse_dates: list[str] | dict[str, str] | dict[str, dict[str, Any]] | None = ..., chunksize: None = ..., dtype: DtypeArg | None = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., ) -> DataFrame: ... @overload def read_sql( @@ -119,7 +119,7 @@ def read_sql( *, chunksize: int, dtype: DtypeArg | None = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., ) -> Generator[DataFrame, None, None]: ... @overload def read_sql( @@ -139,7 +139,7 @@ def read_sql( columns: list[str] = ..., chunksize: None = ..., dtype: DtypeArg | None = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., ) -> DataFrame: ... class PandasSQL: diff --git a/pandas-stubs/io/xml.pyi b/pandas-stubs/io/xml.pyi index cfbefd18b..5a51174d7 100644 --- a/pandas-stubs/io/xml.pyi +++ b/pandas-stubs/io/xml.pyi @@ -2,7 +2,7 @@ from collections.abc import Sequence from pandas.core.frame import DataFrame -from pandas._libs.lib import NoDefault +from pandas._libs.lib import NoDefaultDoNotUse from pandas._typing import ( CompressionOptions, ConvertersArg, @@ -33,5 +33,5 @@ def read_xml( iterparse: dict[str, list[str]] | None = ..., compression: CompressionOptions = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefault = ..., + dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., ) -> DataFrame: ... From 46b300c02e9e6541a105bd851645e0f1abf24f82 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Diridollou?= Date: Wed, 25 Jun 2025 07:27:20 -0400 Subject: [PATCH 2/2] GH851 Change use of NoDefault to make it more private --- pandas-stubs/_libs/lib.pyi | 2 +- pandas-stubs/api/typing/__init__.pyi | 2 +- pandas-stubs/core/frame.pyi | 66 +++++++++++++-------------- pandas-stubs/core/generic.pyi | 4 +- pandas-stubs/core/groupby/groupby.pyi | 22 ++++----- pandas-stubs/core/groupby/grouper.pyi | 4 +- pandas-stubs/core/series.pyi | 22 ++++----- pandas-stubs/core/tools/numeric.pyi | 8 ++-- pandas-stubs/io/clipboards.pyi | 8 ++-- pandas-stubs/io/excel/_base.pyi | 10 ++-- pandas-stubs/io/feather_format.pyi | 4 +- pandas-stubs/io/html.pyi | 4 +- pandas-stubs/io/json/_json.pyi | 18 ++++---- pandas-stubs/io/orc.pyi | 4 +- pandas-stubs/io/parsers/readers.pyi | 14 +++--- pandas-stubs/io/spss.pyi | 4 +- pandas-stubs/io/sql.pyi | 14 +++--- pandas-stubs/io/xml.pyi | 4 +- 18 files changed, 107 insertions(+), 107 deletions(-) diff --git a/pandas-stubs/_libs/lib.pyi b/pandas-stubs/_libs/lib.pyi index e06eda996..022dea514 100644 --- a/pandas-stubs/_libs/lib.pyi +++ b/pandas-stubs/_libs/lib.pyi @@ -14,7 +14,7 @@ class _NoDefault(Enum): no_default = ... no_default: Final = _NoDefault.no_default -NoDefaultDoNotUse: TypeAlias = Literal[_NoDefault.no_default] +_NoDefaultDoNotUse: TypeAlias = Literal[_NoDefault.no_default] # noqa: PYI047 def infer_dtype(value: object, skipna: bool = ...) -> str: ... def is_iterator(obj: object) -> bool: ... diff --git a/pandas-stubs/api/typing/__init__.pyi b/pandas-stubs/api/typing/__init__.pyi index f2259d219..ec25b2671 100644 --- a/pandas-stubs/api/typing/__init__.pyi +++ b/pandas-stubs/api/typing/__init__.pyi @@ -21,7 +21,7 @@ from pandas.core.window import ( ) from pandas._libs import NaTType as NaTType -from pandas._libs.lib import NoDefaultDoNotUse as NoDefaultDoNotUse +from pandas._libs.lib import _NoDefaultDoNotUse as _NoDefaultDoNotUse from pandas._libs.missing import NAType as NAType from pandas.io.json._json import JsonReader as JsonReader diff --git a/pandas-stubs/core/frame.pyi b/pandas-stubs/core/frame.pyi index ae640b4e3..31517f2d0 100644 --- a/pandas-stubs/core/frame.pyi +++ b/pandas-stubs/core/frame.pyi @@ -72,7 +72,7 @@ from typing_extensions import ( ) import xarray as xr -from pandas._libs.lib import NoDefaultDoNotUse +from pandas._libs.lib import _NoDefaultDoNotUse from pandas._libs.missing import NAType from pandas._libs.tslibs import BaseOffset from pandas._libs.tslibs.nattype import NaTType @@ -1107,192 +1107,192 @@ class DataFrame(NDFrame, OpsMixin, _GetItemHack): def groupby( # pyright: ignore reportOverlappingOverload self, by: Scalar, - axis: AxisIndex | NoDefaultDoNotUse = ..., + axis: AxisIndex | _NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[True] = True, sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefaultDoNotUse = ..., + observed: _bool | _NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[Scalar, Literal[True]]: ... @overload def groupby( self, by: Scalar, - axis: AxisIndex | NoDefaultDoNotUse = ..., + axis: AxisIndex | _NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[False] = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefaultDoNotUse = ..., + observed: _bool | _NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[Scalar, Literal[False]]: ... @overload def groupby( # pyright: ignore reportOverlappingOverload self, by: DatetimeIndex, - axis: AxisIndex | NoDefaultDoNotUse = ..., + axis: AxisIndex | _NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[True] = True, sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefaultDoNotUse = ..., + observed: _bool | _NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[Timestamp, Literal[True]]: ... @overload def groupby( # pyright: ignore reportOverlappingOverload self, by: DatetimeIndex, - axis: AxisIndex | NoDefaultDoNotUse = ..., + axis: AxisIndex | _NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[False] = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefaultDoNotUse = ..., + observed: _bool | _NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[Timestamp, Literal[False]]: ... @overload def groupby( # pyright: ignore reportOverlappingOverload self, by: TimedeltaIndex, - axis: AxisIndex | NoDefaultDoNotUse = ..., + axis: AxisIndex | _NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[True] = True, sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefaultDoNotUse = ..., + observed: _bool | _NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[Timedelta, Literal[True]]: ... @overload def groupby( self, by: TimedeltaIndex, - axis: AxisIndex | NoDefaultDoNotUse = ..., + axis: AxisIndex | _NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[False] = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefaultDoNotUse = ..., + observed: _bool | _NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[Timedelta, Literal[False]]: ... @overload def groupby( # pyright: ignore reportOverlappingOverload self, by: PeriodIndex, - axis: AxisIndex | NoDefaultDoNotUse = ..., + axis: AxisIndex | _NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[True] = True, sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefaultDoNotUse = ..., + observed: _bool | _NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[Period, Literal[True]]: ... @overload def groupby( self, by: PeriodIndex, - axis: AxisIndex | NoDefaultDoNotUse = ..., + axis: AxisIndex | _NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[False] = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefaultDoNotUse = ..., + observed: _bool | _NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[Period, Literal[False]]: ... @overload def groupby( # pyright: ignore reportOverlappingOverload self, by: IntervalIndex[IntervalT], - axis: AxisIndex | NoDefaultDoNotUse = ..., + axis: AxisIndex | _NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[True] = True, sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefaultDoNotUse = ..., + observed: _bool | _NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[IntervalT, Literal[True]]: ... @overload def groupby( self, by: IntervalIndex[IntervalT], - axis: AxisIndex | NoDefaultDoNotUse = ..., + axis: AxisIndex | _NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[False] = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefaultDoNotUse = ..., + observed: _bool | _NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[IntervalT, Literal[False]]: ... @overload def groupby( # type: ignore[overload-overlap] # pyright: ignore reportOverlappingOverload self, by: MultiIndex | GroupByObjectNonScalar | None = ..., - axis: AxisIndex | NoDefaultDoNotUse = ..., + axis: AxisIndex | _NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[True] = True, sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefaultDoNotUse = ..., + observed: _bool | _NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[tuple, Literal[True]]: ... @overload def groupby( # type: ignore[overload-overlap] self, by: MultiIndex | GroupByObjectNonScalar | None = ..., - axis: AxisIndex | NoDefaultDoNotUse = ..., + axis: AxisIndex | _NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[False] = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefaultDoNotUse = ..., + observed: _bool | _NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[tuple, Literal[False]]: ... @overload def groupby( # pyright: ignore reportOverlappingOverload self, by: Series[SeriesByT], - axis: AxisIndex | NoDefaultDoNotUse = ..., + axis: AxisIndex | _NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[True] = True, sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefaultDoNotUse = ..., + observed: _bool | _NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[SeriesByT, Literal[True]]: ... @overload def groupby( self, by: Series[SeriesByT], - axis: AxisIndex | NoDefaultDoNotUse = ..., + axis: AxisIndex | _NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[False] = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefaultDoNotUse = ..., + observed: _bool | _NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[SeriesByT, Literal[False]]: ... @overload def groupby( self, by: CategoricalIndex | Index | Series, - axis: AxisIndex | NoDefaultDoNotUse = ..., + axis: AxisIndex | _NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[True] = True, sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefaultDoNotUse = ..., + observed: _bool | _NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[Any, Literal[True]]: ... @overload def groupby( self, by: CategoricalIndex | Index | Series, - axis: AxisIndex | NoDefaultDoNotUse = ..., + axis: AxisIndex | _NoDefaultDoNotUse = ..., level: IndexLabel | None = ..., as_index: Literal[False] = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefaultDoNotUse = ..., + observed: _bool | _NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> DataFrameGroupBy[Any, Literal[False]]: ... def pivot( diff --git a/pandas-stubs/core/generic.pyi b/pandas-stubs/core/generic.pyi index e3257a20f..b0f81b17c 100644 --- a/pandas-stubs/core/generic.pyi +++ b/pandas-stubs/core/generic.pyi @@ -32,7 +32,7 @@ from typing_extensions import ( Self, ) -from pandas._libs.lib import NoDefaultDoNotUse +from pandas._libs.lib import _NoDefaultDoNotUse from pandas._typing import ( Axis, CompressionOptions, @@ -404,7 +404,7 @@ class NDFrame(indexing.IndexingMixin): def resample( self, rule: Frequency | dt.timedelta, - axis: Axis | NoDefaultDoNotUse = ..., + axis: Axis | _NoDefaultDoNotUse = ..., closed: Literal["right", "left"] | None = ..., label: Literal["right", "left"] | None = ..., convention: ToTimestampHow = ..., diff --git a/pandas-stubs/core/groupby/groupby.pyi b/pandas-stubs/core/groupby/groupby.pyi index c0cfec39e..4376d9b39 100644 --- a/pandas-stubs/core/groupby/groupby.pyi +++ b/pandas-stubs/core/groupby/groupby.pyi @@ -46,7 +46,7 @@ from typing_extensions import ( TypeAlias, ) -from pandas._libs.lib import NoDefaultDoNotUse +from pandas._libs.lib import _NoDefaultDoNotUse from pandas._libs.tslibs import BaseOffset from pandas._typing import ( S1, @@ -283,27 +283,27 @@ class GroupBy(BaseGroupBy[NDFrameT]): ascending: bool = ..., na_option: str = ..., pct: bool = ..., - axis: AxisInt | NoDefaultDoNotUse = ..., + axis: AxisInt | _NoDefaultDoNotUse = ..., ) -> NDFrameT: ... @final def cumprod( - self, axis: Axis | NoDefaultDoNotUse = ..., *args, **kwargs + self, axis: Axis | _NoDefaultDoNotUse = ..., *args, **kwargs ) -> NDFrameT: ... @final def cumsum( - self, axis: Axis | NoDefaultDoNotUse = ..., *args, **kwargs + self, axis: Axis | _NoDefaultDoNotUse = ..., *args, **kwargs ) -> NDFrameT: ... @final def cummin( self, - axis: AxisInt | NoDefaultDoNotUse = ..., + axis: AxisInt | _NoDefaultDoNotUse = ..., numeric_only: bool = ..., **kwargs, ) -> NDFrameT: ... @final def cummax( self, - axis: AxisInt | NoDefaultDoNotUse = ..., + axis: AxisInt | _NoDefaultDoNotUse = ..., numeric_only: bool = ..., **kwargs, ) -> NDFrameT: ... @@ -312,22 +312,22 @@ class GroupBy(BaseGroupBy[NDFrameT]): self, periods: int | Sequence[int] = ..., freq: Frequency | None = ..., - axis: Axis | NoDefaultDoNotUse = ..., + axis: Axis | _NoDefaultDoNotUse = ..., fill_value=..., suffix: str | None = ..., ) -> NDFrameT: ... @final def diff( - self, periods: int = ..., axis: AxisInt | NoDefaultDoNotUse = ... + self, periods: int = ..., axis: AxisInt | _NoDefaultDoNotUse = ... ) -> NDFrameT: ... @final def pct_change( self, periods: int = ..., - fill_method: Literal["bfill", "ffill"] | None | NoDefaultDoNotUse = ..., - limit: int | None | NoDefaultDoNotUse = ..., + fill_method: Literal["bfill", "ffill"] | None | _NoDefaultDoNotUse = ..., + limit: int | None | _NoDefaultDoNotUse = ..., freq=..., - axis: Axis | NoDefaultDoNotUse = ..., + axis: Axis | _NoDefaultDoNotUse = ..., ) -> NDFrameT: ... @final def head(self, n: int = ...) -> NDFrameT: ... diff --git a/pandas-stubs/core/groupby/grouper.pyi b/pandas-stubs/core/groupby/grouper.pyi index d8a604da8..d58968857 100644 --- a/pandas-stubs/core/groupby/grouper.pyi +++ b/pandas-stubs/core/groupby/grouper.pyi @@ -16,7 +16,7 @@ from pandas import ( from pandas.core.resample import TimeGrouper from typing_extensions import Self -from pandas._libs.lib import NoDefaultDoNotUse +from pandas._libs.lib import _NoDefaultDoNotUse from pandas._typing import ( ArrayLike, Axis, @@ -42,7 +42,7 @@ class Grouper: cls, key: KeysArgType | None = ..., level: Level | ListLikeHashable[Level] | None = ..., - axis: Axis | NoDefaultDoNotUse = ..., + axis: Axis | _NoDefaultDoNotUse = ..., sort: bool = ..., dropna: bool = ..., ) -> Self: ... diff --git a/pandas-stubs/core/series.pyi b/pandas-stubs/core/series.pyi index 92b36d0d9..4f1852721 100644 --- a/pandas-stubs/core/series.pyi +++ b/pandas-stubs/core/series.pyi @@ -96,7 +96,7 @@ from pandas._libs.interval import ( Interval, _OrderableT, ) -from pandas._libs.lib import NoDefaultDoNotUse +from pandas._libs.lib import _NoDefaultDoNotUse from pandas._libs.missing import NAType from pandas._libs.tslibs import BaseOffset from pandas._libs.tslibs.nattype import NaTType @@ -629,7 +629,7 @@ class Series(IndexOpsMixin[S1], NDFrame): as_index: _bool = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefaultDoNotUse = ..., + observed: _bool | _NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> SeriesGroupBy[S1, Scalar]: ... @overload @@ -641,7 +641,7 @@ class Series(IndexOpsMixin[S1], NDFrame): as_index: _bool = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefaultDoNotUse = ..., + observed: _bool | _NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> SeriesGroupBy[S1, Timestamp]: ... @overload @@ -653,7 +653,7 @@ class Series(IndexOpsMixin[S1], NDFrame): as_index: _bool = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefaultDoNotUse = ..., + observed: _bool | _NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> SeriesGroupBy[S1, Timedelta]: ... @overload @@ -665,7 +665,7 @@ class Series(IndexOpsMixin[S1], NDFrame): as_index: _bool = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefaultDoNotUse = ..., + observed: _bool | _NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> SeriesGroupBy[S1, Period]: ... @overload @@ -677,7 +677,7 @@ class Series(IndexOpsMixin[S1], NDFrame): as_index: _bool = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefaultDoNotUse = ..., + observed: _bool | _NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> SeriesGroupBy[S1, IntervalT]: ... @overload @@ -689,7 +689,7 @@ class Series(IndexOpsMixin[S1], NDFrame): as_index: _bool = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefaultDoNotUse = ..., + observed: _bool | _NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> SeriesGroupBy[S1, tuple]: ... @overload @@ -701,7 +701,7 @@ class Series(IndexOpsMixin[S1], NDFrame): as_index: _bool = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefaultDoNotUse = ..., + observed: _bool | _NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> SeriesGroupBy[S1, Scalar]: ... @overload @@ -714,7 +714,7 @@ class Series(IndexOpsMixin[S1], NDFrame): as_index: _bool = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefaultDoNotUse = ..., + observed: _bool | _NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> SeriesGroupBy[S1, Scalar]: ... @overload @@ -726,7 +726,7 @@ class Series(IndexOpsMixin[S1], NDFrame): as_index: _bool = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefaultDoNotUse = ..., + observed: _bool | _NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> SeriesGroupBy[S1, SeriesByT]: ... @overload @@ -738,7 +738,7 @@ class Series(IndexOpsMixin[S1], NDFrame): as_index: _bool = ..., sort: _bool = ..., group_keys: _bool = ..., - observed: _bool | NoDefaultDoNotUse = ..., + observed: _bool | _NoDefaultDoNotUse = ..., dropna: _bool = ..., ) -> SeriesGroupBy[S1, Any]: ... # need the ignore because None is Hashable diff --git a/pandas-stubs/core/tools/numeric.pyi b/pandas-stubs/core/tools/numeric.pyi index 66e04d95f..e6eea93bc 100644 --- a/pandas-stubs/core/tools/numeric.pyi +++ b/pandas-stubs/core/tools/numeric.pyi @@ -7,7 +7,7 @@ import numpy as np import pandas as pd from typing_extensions import TypeAlias -from pandas._libs.lib import NoDefaultDoNotUse +from pandas._libs.lib import _NoDefaultDoNotUse from pandas._typing import ( DtypeBackend, RaiseCoerce, @@ -22,19 +22,19 @@ def to_numeric( arg: Scalar, errors: Literal["raise", "coerce"] = ..., downcast: _Downcast = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., ) -> float: ... @overload def to_numeric( arg: list | tuple | np.ndarray, errors: RaiseCoerce = ..., downcast: _Downcast = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., ) -> npt.NDArray: ... @overload def to_numeric( arg: pd.Series, errors: RaiseCoerce = ..., downcast: _Downcast = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., ) -> pd.Series: ... diff --git a/pandas-stubs/io/clipboards.pyi b/pandas-stubs/io/clipboards.pyi index 9dcc7f139..2aaba3806 100644 --- a/pandas-stubs/io/clipboards.pyi +++ b/pandas-stubs/io/clipboards.pyi @@ -12,7 +12,7 @@ from typing import ( from pandas.core.frame import DataFrame -from pandas._libs.lib import NoDefaultDoNotUse +from pandas._libs.lib import _NoDefaultDoNotUse from pandas._typing import ( CompressionOptions, CSVEngine, @@ -30,7 +30,7 @@ from pandas.io.parsers import TextFileReader def read_clipboard( sep: str | None = ..., *, - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., delimiter: str | None = ..., header: int | Sequence[int] | Literal["infer"] | None = ..., names: ListLikeHashable | None = ..., @@ -89,7 +89,7 @@ def read_clipboard( def read_clipboard( sep: str | None = ..., *, - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., delimiter: str | None = ..., header: int | Sequence[int] | Literal["infer"] | None = ..., names: ListLikeHashable | None = ..., @@ -148,7 +148,7 @@ def read_clipboard( def read_clipboard( sep: str | None = ..., *, - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., delimiter: str | None = ..., header: int | Sequence[int] | Literal["infer"] | None = ..., names: ListLikeHashable | None = ..., diff --git a/pandas-stubs/io/excel/_base.pyi b/pandas-stubs/io/excel/_base.pyi index 834c902f1..e370871b4 100644 --- a/pandas-stubs/io/excel/_base.pyi +++ b/pandas-stubs/io/excel/_base.pyi @@ -19,7 +19,7 @@ import pyxlsb.workbook from typing_extensions import Self from xlrd.book import Book -from pandas._libs.lib import NoDefaultDoNotUse +from pandas._libs.lib import _NoDefaultDoNotUse from pandas._typing import ( Dtype, DtypeBackend, @@ -75,7 +75,7 @@ def read_excel( comment: str | None = ..., skipfooter: int = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., engine_kwargs: dict[str, Any] | None = ..., ) -> dict[IntStrT, DataFrame]: ... @overload @@ -118,7 +118,7 @@ def read_excel( comment: str | None = ..., skipfooter: int = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., engine_kwargs: dict[str, Any] | None = ..., ) -> dict[str, DataFrame]: ... @overload @@ -162,7 +162,7 @@ def read_excel( # type: ignore[overload-cannot-match] comment: str | None = ..., skipfooter: int = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., engine_kwargs: dict[str, Any] | None = ..., ) -> dict[int | str, DataFrame]: ... @overload @@ -205,7 +205,7 @@ def read_excel( comment: str | None = ..., skipfooter: int = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., engine_kwargs: dict[str, Any] | None = ..., ) -> DataFrame: ... diff --git a/pandas-stubs/io/feather_format.pyi b/pandas-stubs/io/feather_format.pyi index fd9aab3e4..47029882c 100644 --- a/pandas-stubs/io/feather_format.pyi +++ b/pandas-stubs/io/feather_format.pyi @@ -1,6 +1,6 @@ from pandas import DataFrame -from pandas._libs.lib import NoDefaultDoNotUse +from pandas._libs.lib import _NoDefaultDoNotUse from pandas._typing import ( DtypeBackend, FilePath, @@ -14,5 +14,5 @@ def read_feather( columns: list[HashableT] | None = ..., use_threads: bool = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., ) -> DataFrame: ... diff --git a/pandas-stubs/io/html.pyi b/pandas-stubs/io/html.pyi index b4f28f7e8..091475b26 100644 --- a/pandas-stubs/io/html.pyi +++ b/pandas-stubs/io/html.pyi @@ -12,7 +12,7 @@ from typing import ( from pandas.core.frame import DataFrame -from pandas._libs.lib import NoDefaultDoNotUse +from pandas._libs.lib import _NoDefaultDoNotUse from pandas._typing import ( DtypeBackend, FilePath, @@ -53,6 +53,6 @@ def read_html( keep_default_na: bool = ..., displayed_only: bool = ..., extract_links: Literal["header", "footer", "body", "all"] | None = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., storage_options: StorageOptions = ..., ) -> list[DataFrame]: ... diff --git a/pandas-stubs/io/json/_json.pyi b/pandas-stubs/io/json/_json.pyi index da24d0c9c..03659ec37 100644 --- a/pandas-stubs/io/json/_json.pyi +++ b/pandas-stubs/io/json/_json.pyi @@ -10,7 +10,7 @@ from typing import ( from pandas.core.frame import DataFrame from pandas.core.series import Series -from pandas._libs.lib import NoDefaultDoNotUse +from pandas._libs.lib import _NoDefaultDoNotUse from pandas._typing import ( CompressionOptions, DtypeArg, @@ -47,7 +47,7 @@ def read_json( compression: CompressionOptions = ..., nrows: int | None = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., engine: Literal["ujson"] = ..., ) -> JsonReader[Series]: ... @overload @@ -72,7 +72,7 @@ def read_json( compression: CompressionOptions = ..., nrows: int | None = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., engine: Literal["pyarrow"], ) -> JsonReader[Series]: ... @overload @@ -97,7 +97,7 @@ def read_json( compression: CompressionOptions = ..., nrows: int | None = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., engine: Literal["ujson"] = ..., ) -> JsonReader[DataFrame]: ... @overload @@ -122,7 +122,7 @@ def read_json( compression: CompressionOptions = ..., nrows: int | None = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., engine: Literal["pyarrow"], ) -> JsonReader[DataFrame]: ... @overload @@ -147,7 +147,7 @@ def read_json( compression: CompressionOptions = ..., nrows: int | None = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., engine: Literal["ujson"] = ..., ) -> Series: ... @overload @@ -172,7 +172,7 @@ def read_json( compression: CompressionOptions = ..., nrows: int | None = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., engine: Literal["pyarrow"], ) -> Series: ... @overload @@ -197,7 +197,7 @@ def read_json( compression: CompressionOptions = ..., nrows: int | None = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., engine: Literal["ujson"] = ..., ) -> DataFrame: ... @overload @@ -222,7 +222,7 @@ def read_json( compression: CompressionOptions = ..., nrows: int | None = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., engine: Literal["pyarrow"], ) -> DataFrame: ... diff --git a/pandas-stubs/io/orc.pyi b/pandas-stubs/io/orc.pyi index 533ef46ed..be90d86e4 100644 --- a/pandas-stubs/io/orc.pyi +++ b/pandas-stubs/io/orc.pyi @@ -2,7 +2,7 @@ from typing import Any from pandas import DataFrame -from pandas._libs.lib import NoDefaultDoNotUse +from pandas._libs.lib import _NoDefaultDoNotUse from pandas._typing import ( DtypeBackend, FilePath, @@ -13,7 +13,7 @@ from pandas._typing import ( def read_orc( path: FilePath | ReadBuffer[bytes], columns: list[HashableT] | None = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., # TODO type with the correct pyarrow types # filesystem: pyarrow.fs.FileSystem | fsspec.spec.AbstractFileSystem filesystem: Any | None = ..., diff --git a/pandas-stubs/io/parsers/readers.pyi b/pandas-stubs/io/parsers/readers.pyi index fb66ba45e..8b52faaa8 100644 --- a/pandas-stubs/io/parsers/readers.pyi +++ b/pandas-stubs/io/parsers/readers.pyi @@ -19,7 +19,7 @@ from typing import ( from pandas.core.frame import DataFrame from typing_extensions import Self -from pandas._libs.lib import NoDefaultDoNotUse +from pandas._libs.lib import _NoDefaultDoNotUse from pandas._typing import ( CompressionOptions, CSVEngine, @@ -98,7 +98,7 @@ def read_csv( memory_map: bool = ..., float_precision: Literal["high", "legacy", "round_trip"] | None = ..., storage_options: StorageOptions | None = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., ) -> TextFileReader: ... @overload def read_csv( @@ -162,7 +162,7 @@ def read_csv( memory_map: bool = ..., float_precision: Literal["high", "legacy", "round_trip"] | None = ..., storage_options: StorageOptions | None = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., ) -> TextFileReader: ... @overload def read_csv( @@ -226,7 +226,7 @@ def read_csv( memory_map: bool = ..., float_precision: Literal["high", "legacy", "round_trip"] | None = ..., storage_options: StorageOptions | None = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., ) -> DataFrame: ... @overload def read_table( @@ -427,7 +427,7 @@ def read_fwf( colspecs: Sequence[tuple[int, int]] | Literal["infer"] | None = ..., widths: Sequence[int] | None = ..., infer_nrows: int = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., date_format: dict[Hashable, str] | str | None = ..., iterator: Literal[True], chunksize: int | None = ..., @@ -440,7 +440,7 @@ def read_fwf( colspecs: Sequence[tuple[int, int]] | Literal["infer"] | None = ..., widths: Sequence[int] | None = ..., infer_nrows: int = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., date_format: dict[Hashable, str] | str | None = ..., iterator: bool = ..., chunksize: int, @@ -453,7 +453,7 @@ def read_fwf( colspecs: Sequence[tuple[int, int]] | Literal["infer"] | None = ..., widths: Sequence[int] | None = ..., infer_nrows: int = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., date_format: dict[Hashable, str] | str | None = ..., iterator: Literal[False] = ..., chunksize: None = ..., diff --git a/pandas-stubs/io/spss.pyi b/pandas-stubs/io/spss.pyi index c5e886c08..52d4a23ec 100644 --- a/pandas-stubs/io/spss.pyi +++ b/pandas-stubs/io/spss.pyi @@ -1,6 +1,6 @@ from pandas.core.frame import DataFrame -from pandas._libs.lib import NoDefaultDoNotUse +from pandas._libs.lib import _NoDefaultDoNotUse from pandas._typing import ( DtypeBackend, FilePath, @@ -11,5 +11,5 @@ def read_spss( path: FilePath, usecols: list[HashableT] | None = ..., convert_categoricals: bool = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., ) -> DataFrame: ... diff --git a/pandas-stubs/io/sql.pyi b/pandas-stubs/io/sql.pyi index 674abcb6f..4036c6e77 100644 --- a/pandas-stubs/io/sql.pyi +++ b/pandas-stubs/io/sql.pyi @@ -17,7 +17,7 @@ from sqlalchemy.orm import FromStatement import sqlalchemy.sql.expression from typing_extensions import TypeAlias -from pandas._libs.lib import NoDefaultDoNotUse +from pandas._libs.lib import _NoDefaultDoNotUse from pandas._typing import ( DtypeArg, DtypeBackend, @@ -47,7 +47,7 @@ def read_sql_table( columns: list[str] | None = ..., *, chunksize: int, - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., ) -> Generator[DataFrame, None, None]: ... @overload def read_sql_table( @@ -59,7 +59,7 @@ def read_sql_table( parse_dates: list[str] | dict[str, str] | dict[str, dict[str, Any]] | None = ..., columns: list[str] | None = ..., chunksize: None = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., ) -> DataFrame: ... @overload def read_sql_query( @@ -79,7 +79,7 @@ def read_sql_query( *, chunksize: int, dtype: DtypeArg | None = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., ) -> Generator[DataFrame, None, None]: ... @overload def read_sql_query( @@ -98,7 +98,7 @@ def read_sql_query( parse_dates: list[str] | dict[str, str] | dict[str, dict[str, Any]] | None = ..., chunksize: None = ..., dtype: DtypeArg | None = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., ) -> DataFrame: ... @overload def read_sql( @@ -119,7 +119,7 @@ def read_sql( *, chunksize: int, dtype: DtypeArg | None = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., ) -> Generator[DataFrame, None, None]: ... @overload def read_sql( @@ -139,7 +139,7 @@ def read_sql( columns: list[str] = ..., chunksize: None = ..., dtype: DtypeArg | None = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., ) -> DataFrame: ... class PandasSQL: diff --git a/pandas-stubs/io/xml.pyi b/pandas-stubs/io/xml.pyi index 5a51174d7..d76408e75 100644 --- a/pandas-stubs/io/xml.pyi +++ b/pandas-stubs/io/xml.pyi @@ -2,7 +2,7 @@ from collections.abc import Sequence from pandas.core.frame import DataFrame -from pandas._libs.lib import NoDefaultDoNotUse +from pandas._libs.lib import _NoDefaultDoNotUse from pandas._typing import ( CompressionOptions, ConvertersArg, @@ -33,5 +33,5 @@ def read_xml( iterparse: dict[str, list[str]] | None = ..., compression: CompressionOptions = ..., storage_options: StorageOptions = ..., - dtype_backend: DtypeBackend | NoDefaultDoNotUse = ..., + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., ) -> DataFrame: ...