From 8c21376aa94deb91cb10e78f56ecd68aa7fe90fa Mon Sep 17 00:00:00 2001 From: Maximilian Roos <5635139+max-sixty@users.noreply.github.com> Date: Wed, 20 Sep 2023 02:38:32 -0700 Subject: [PATCH 01/17] Start a list of modules which require typing (#8198) * Start a list of modules which require typing Notes inline. Just one module so far! --- pyproject.toml | 11 ++++++++++- xarray/core/rolling_exp.py | 20 +++++++++++++++----- 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index dd380937bd2..663920f8dbb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -79,7 +79,7 @@ show_error_context = true warn_redundant_casts = true warn_unused_ignores = true -# Most of the numerical computing stack doesn't have type annotations yet. +# Much of the numerical computing stack doesn't have type annotations yet. [[tool.mypy.overrides]] ignore_missing_imports = true module = [ @@ -118,6 +118,15 @@ module = [ "numpy.exceptions.*", # remove once support for `numpy<2.0` has been dropped ] +# Gradually we want to add more modules to this list, ratcheting up our total +# coverage. Once a module is here, functions require annotations in order to +# pass mypy. It would be especially useful to have tests here, because without +# annotating test functions, we don't have a great way of testing our type +# annotations — even with just `-> None` is sufficient for mypy to check them. +[[tool.mypy.overrides]] +disallow_untyped_defs = true +module = ["xarray.core.rolling_exp"] + [tool.ruff] builtins = ["ellipsis"] exclude = [ diff --git a/xarray/core/rolling_exp.py b/xarray/core/rolling_exp.py index bd30c634aae..c56bf6a384e 100644 --- a/xarray/core/rolling_exp.py +++ b/xarray/core/rolling_exp.py @@ -9,10 +9,15 @@ from xarray.core.options import _get_keep_attrs from xarray.core.pdcompat import count_not_none from xarray.core.pycompat import is_duck_dask_array -from xarray.core.types import T_DataWithCoords +from xarray.core.types import T_DataWithCoords, T_DuckArray -def _get_alpha(com=None, span=None, halflife=None, alpha=None): +def _get_alpha( + com: float | None = None, + span: float | None = None, + halflife: float | None = None, + alpha: float | None = None, +) -> float: # pandas defines in terms of com (converting to alpha in the algo) # so use its function to get a com and then convert to alpha @@ -20,7 +25,7 @@ def _get_alpha(com=None, span=None, halflife=None, alpha=None): return 1 / (1 + com) -def move_exp_nanmean(array, *, axis, alpha): +def move_exp_nanmean(array: T_DuckArray, *, axis: int, alpha: float) -> np.ndarray: if is_duck_dask_array(array): raise TypeError("rolling_exp is not currently support for dask-like arrays") import numbagg @@ -32,7 +37,7 @@ def move_exp_nanmean(array, *, axis, alpha): return numbagg.move_exp_nanmean(array, axis=axis, alpha=alpha) -def move_exp_nansum(array, *, axis, alpha): +def move_exp_nansum(array: T_DuckArray, *, axis: int, alpha: float) -> np.ndarray: if is_duck_dask_array(array): raise TypeError("rolling_exp is not currently supported for dask-like arrays") import numbagg @@ -40,7 +45,12 @@ def move_exp_nansum(array, *, axis, alpha): return numbagg.move_exp_nansum(array, axis=axis, alpha=alpha) -def _get_center_of_mass(comass, span, halflife, alpha): +def _get_center_of_mass( + comass: float | None, + span: float | None, + halflife: float | None, + alpha: float | None, +) -> float: """ Vendored from pandas.core.window.common._get_center_of_mass From 04550e64089e58646be23d83695f32a1669db8eb Mon Sep 17 00:00:00 2001 From: Riulinchen <119889091+Riulinchen@users.noreply.github.com> Date: Wed, 20 Sep 2023 21:25:57 +0200 Subject: [PATCH 02/17] Make documentation of DataArray.where clearer (#7955) * Make doc of DataArray.where clearer * Update xarray/core/common.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Loren Co-authored-by: Deepak Cherian Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- xarray/core/common.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/xarray/core/common.py b/xarray/core/common.py index ade701457c6..224b4154ef8 100644 --- a/xarray/core/common.py +++ b/xarray/core/common.py @@ -1066,6 +1066,9 @@ def where( ) -> T_DataWithCoords: """Filter elements from this object according to a condition. + Returns elements from 'DataArray', where 'cond' is True, + otherwise fill in 'other'. + This operation follows the normal broadcasting and alignment rules that xarray uses for binary arithmetic. From 2b784f24548a28a88a373c98722e562c7ddc7e01 Mon Sep 17 00:00:00 2001 From: Amrest Chinkamol Date: Thu, 21 Sep 2023 02:31:49 +0700 Subject: [PATCH 03/17] Consistent `DatasetRolling.construct` behavior (#7578) * Removed `.isel` for consistent rolling behavior. `.isel` causes `DatasetRolling.construct` to behavior to be inconsistent with `DataArrayRolling.construct` when `stride` > 1. * new rolling construct strategy for coords * add whats-new * add new tests with different coords * next try on aligning strided coords * add peakmem test for rolling.construct * increase asv benchmark rolling sizes --------- Co-authored-by: Michael Niklas --- asv_bench/benchmarks/rolling.py | 13 ++++++-- doc/whats-new.rst | 5 ++- xarray/core/rolling.py | 9 ++++-- xarray/tests/test_rolling.py | 55 ++++++++++++++++++++++++++++----- 4 files changed, 68 insertions(+), 14 deletions(-) diff --git a/asv_bench/benchmarks/rolling.py b/asv_bench/benchmarks/rolling.py index 1d3713f19bf..579f4f00fbc 100644 --- a/asv_bench/benchmarks/rolling.py +++ b/asv_bench/benchmarks/rolling.py @@ -5,10 +5,10 @@ from . import parameterized, randn, requires_dask -nx = 300 +nx = 3000 long_nx = 30000 ny = 200 -nt = 100 +nt = 1000 window = 20 randn_xy = randn((nx, ny), frac_nan=0.1) @@ -115,6 +115,11 @@ def peakmem_1drolling_reduce(self, func, use_bottleneck): roll = self.ds.var3.rolling(t=100) getattr(roll, func)() + @parameterized(["stride"], ([None, 5, 50])) + def peakmem_1drolling_construct(self, stride): + self.ds.var2.rolling(t=100).construct("w", stride=stride) + self.ds.var3.rolling(t=100).construct("w", stride=stride) + class DatasetRollingMemory(RollingMemory): @parameterized(["func", "use_bottleneck"], (["sum", "max", "mean"], [True, False])) @@ -128,3 +133,7 @@ def peakmem_1drolling_reduce(self, func, use_bottleneck): with xr.set_options(use_bottleneck=use_bottleneck): roll = self.ds.rolling(t=100) getattr(roll, func)() + + @parameterized(["stride"], ([None, 5, 50])) + def peakmem_1drolling_construct(self, stride): + self.ds.rolling(t=100).construct("w", stride=stride) diff --git a/doc/whats-new.rst b/doc/whats-new.rst index ee74411a004..67429ed7e18 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -74,9 +74,12 @@ Bug fixes of :py:meth:`DataArray.__setitem__` lose dimension names. (:issue:`7030`, :pull:`8067`) By `Darsh Ranjan `_. - Return ``float64`` in presence of ``NaT`` in :py:class:`~core.accessor_dt.DatetimeAccessor` and - special case ``NaT`` handling in :py:meth:`~core.accessor_dt.DatetimeAccessor.isocalendar()` + special case ``NaT`` handling in :py:meth:`~core.accessor_dt.DatetimeAccessor.isocalendar` (:issue:`7928`, :pull:`8084`). By `Kai Mühlbauer `_. +- Fix :py:meth:`~core.rolling.DatasetRolling.construct` with stride on Datasets without indexes. + (:issue:`7021`, :pull:`7578`). + By `Amrest Chinkamol `_ and `Michael Niklas `_. - Calling plot with kwargs ``col``, ``row`` or ``hue`` no longer squeezes dimensions passed via these arguments (:issue:`7552`, :pull:`8174`). By `Wiktor Kraśnicki `_. diff --git a/xarray/core/rolling.py b/xarray/core/rolling.py index d49cb6e13a4..c6911cbe65b 100644 --- a/xarray/core/rolling.py +++ b/xarray/core/rolling.py @@ -785,11 +785,14 @@ def construct( if not keep_attrs: dataset[key].attrs = {} + # Need to stride coords as well. TODO: is there a better way? + coords = self.obj.isel( + {d: slice(None, None, s) for d, s in zip(self.dim, strides)} + ).coords + attrs = self.obj.attrs if keep_attrs else {} - return Dataset(dataset, coords=self.obj.coords, attrs=attrs).isel( - {d: slice(None, None, s) for d, s in zip(self.dim, strides)} - ) + return Dataset(dataset, coords=coords, attrs=attrs) class Coarsen(CoarsenArithmetic, Generic[T_Xarray]): diff --git a/xarray/tests/test_rolling.py b/xarray/tests/test_rolling.py index 9a15696b004..72d1b9071dd 100644 --- a/xarray/tests/test_rolling.py +++ b/xarray/tests/test_rolling.py @@ -175,7 +175,7 @@ def test_rolling_pandas_compat(self, center, window, min_periods) -> None: @pytest.mark.parametrize("center", (True, False)) @pytest.mark.parametrize("window", (1, 2, 3, 4)) - def test_rolling_construct(self, center, window) -> None: + def test_rolling_construct(self, center: bool, window: int) -> None: s = pd.Series(np.arange(10)) da = DataArray.from_series(s) @@ -610,7 +610,7 @@ def test_rolling_pandas_compat(self, center, window, min_periods) -> None: @pytest.mark.parametrize("center", (True, False)) @pytest.mark.parametrize("window", (1, 2, 3, 4)) - def test_rolling_construct(self, center, window) -> None: + def test_rolling_construct(self, center: bool, window: int) -> None: df = pd.DataFrame( { "x": np.random.randn(20), @@ -627,12 +627,6 @@ def test_rolling_construct(self, center, window) -> None: np.testing.assert_allclose(df_rolling["x"].values, ds_rolling_mean["x"].values) np.testing.assert_allclose(df_rolling.index, ds_rolling_mean["index"]) - # with stride - ds_rolling_mean = ds_rolling.construct("window", stride=2).mean("window") - np.testing.assert_allclose( - df_rolling["x"][::2].values, ds_rolling_mean["x"].values - ) - np.testing.assert_allclose(df_rolling.index[::2], ds_rolling_mean["index"]) # with fill_value ds_rolling_mean = ds_rolling.construct("window", stride=2, fill_value=0.0).mean( "window" @@ -640,6 +634,51 @@ def test_rolling_construct(self, center, window) -> None: assert (ds_rolling_mean.isnull().sum() == 0).to_array(dim="vars").all() assert (ds_rolling_mean["x"] == 0.0).sum() >= 0 + @pytest.mark.parametrize("center", (True, False)) + @pytest.mark.parametrize("window", (1, 2, 3, 4)) + def test_rolling_construct_stride(self, center: bool, window: int) -> None: + df = pd.DataFrame( + { + "x": np.random.randn(20), + "y": np.random.randn(20), + "time": np.linspace(0, 1, 20), + } + ) + ds = Dataset.from_dataframe(df) + df_rolling_mean = df.rolling(window, center=center, min_periods=1).mean() + + # With an index (dimension coordinate) + ds_rolling = ds.rolling(index=window, center=center) + ds_rolling_mean = ds_rolling.construct("w", stride=2).mean("w") + np.testing.assert_allclose( + df_rolling_mean["x"][::2].values, ds_rolling_mean["x"].values + ) + np.testing.assert_allclose(df_rolling_mean.index[::2], ds_rolling_mean["index"]) + + # Without index (https://github.com/pydata/xarray/issues/7021) + ds2 = ds.drop_vars("index") + ds2_rolling = ds2.rolling(index=window, center=center) + ds2_rolling_mean = ds2_rolling.construct("w", stride=2).mean("w") + np.testing.assert_allclose( + df_rolling_mean["x"][::2].values, ds2_rolling_mean["x"].values + ) + + # Mixed coordinates, indexes and 2D coordinates + ds3 = xr.Dataset( + {"x": ("t", range(20)), "x2": ("y", range(5))}, + { + "t": range(20), + "y": ("y", range(5)), + "t2": ("t", range(20)), + "y2": ("y", range(5)), + "yt": (["t", "y"], np.ones((20, 5))), + }, + ) + ds3_rolling = ds3.rolling(t=window, center=center) + ds3_rolling_mean = ds3_rolling.construct("w", stride=2).mean("w") + for coord in ds3.coords: + assert coord in ds3_rolling_mean.coords + @pytest.mark.slow @pytest.mark.parametrize("ds", (1, 2), indirect=True) @pytest.mark.parametrize("center", (True, False)) From f60c1a3e969bfa580dfecca2e9ba7fee71447d9b Mon Sep 17 00:00:00 2001 From: Maximilian Roos <5635139+max-sixty@users.noreply.github.com> Date: Wed, 20 Sep 2023 12:57:16 -0700 Subject: [PATCH 04/17] Skip flaky test (#8219) * Skip flaky test --- xarray/tests/test_distributed.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/xarray/tests/test_distributed.py b/xarray/tests/test_distributed.py index 6a8cd9c457b..bfc37121597 100644 --- a/xarray/tests/test_distributed.py +++ b/xarray/tests/test_distributed.py @@ -168,6 +168,10 @@ def test_open_mfdataset_multiple_files_parallel_distributed(parallel, tmp_path): @requires_netCDF4 @pytest.mark.parametrize("parallel", (True, False)) def test_open_mfdataset_multiple_files_parallel(parallel, tmp_path): + if parallel: + pytest.skip( + "Flaky in CI. Would be a welcome contribution to make a similar test reliable." + ) lon = np.arange(100) time = xr.cftime_range("20010101", periods=100, calendar="360_day") data = np.random.random((time.size, lon.size)) From 96cf77a5ceaf849f8b867b4edc873bcb651a0b04 Mon Sep 17 00:00:00 2001 From: Maximilian Roos <5635139+max-sixty@users.noreply.github.com> Date: Wed, 20 Sep 2023 15:57:53 -0700 Subject: [PATCH 05/17] Convert `indexes.py` to use `Self` for typing (#8217) * Convert `Variable` to use `Self` for typing I wanted to do this separately, as it's the only one that adds some casts. And given the ratio of impact-to-potential-merge-conflicts, I didn't want to slow the other PR down, even if it seems to be OK. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update xarray/core/indexes.py Co-authored-by: Michael Niklas * Update xarray/core/indexes.py Co-authored-by: Michael Niklas * Update xarray/core/indexes.py Co-authored-by: Michael Niklas * Update xarray/core/indexes.py Co-authored-by: Michael Niklas * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Michael Niklas --- xarray/core/indexes.py | 63 +++++++++++++++++++++--------------------- 1 file changed, 32 insertions(+), 31 deletions(-) diff --git a/xarray/core/indexes.py b/xarray/core/indexes.py index 9972896d6df..1697762f7ae 100644 --- a/xarray/core/indexes.py +++ b/xarray/core/indexes.py @@ -24,7 +24,7 @@ ) if TYPE_CHECKING: - from xarray.core.types import ErrorOptions, JoinOptions, T_Index + from xarray.core.types import ErrorOptions, JoinOptions, Self from xarray.core.variable import Variable @@ -60,11 +60,11 @@ class Index: @classmethod def from_variables( - cls: type[T_Index], + cls, variables: Mapping[Any, Variable], *, options: Mapping[str, Any], - ) -> T_Index: + ) -> Self: """Create a new index object from one or more coordinate variables. This factory method must be implemented in all subclasses of Index. @@ -88,11 +88,11 @@ def from_variables( @classmethod def concat( - cls: type[T_Index], - indexes: Sequence[T_Index], + cls, + indexes: Sequence[Self], dim: Hashable, positions: Iterable[Iterable[int]] | None = None, - ) -> T_Index: + ) -> Self: """Create a new index by concatenating one or more indexes of the same type. @@ -120,9 +120,7 @@ def concat( raise NotImplementedError() @classmethod - def stack( - cls: type[T_Index], variables: Mapping[Any, Variable], dim: Hashable - ) -> T_Index: + def stack(cls, variables: Mapping[Any, Variable], dim: Hashable) -> Self: """Create a new index by stacking coordinate variables into a single new dimension. @@ -208,8 +206,8 @@ def to_pandas_index(self) -> pd.Index: raise TypeError(f"{self!r} cannot be cast to a pandas.Index object") def isel( - self: T_Index, indexers: Mapping[Any, int | slice | np.ndarray | Variable] - ) -> T_Index | None: + self, indexers: Mapping[Any, int | slice | np.ndarray | Variable] + ) -> Self | None: """Maybe returns a new index from the current index itself indexed by positional indexers. @@ -264,7 +262,7 @@ def sel(self, labels: dict[Any, Any]) -> IndexSelResult: """ raise NotImplementedError(f"{self!r} doesn't support label-based selection") - def join(self: T_Index, other: T_Index, how: JoinOptions = "inner") -> T_Index: + def join(self, other: Self, how: JoinOptions = "inner") -> Self: """Return a new index from the combination of this index with another index of the same type. @@ -286,7 +284,7 @@ def join(self: T_Index, other: T_Index, how: JoinOptions = "inner") -> T_Index: f"{self!r} doesn't support alignment with inner/outer join method" ) - def reindex_like(self: T_Index, other: T_Index) -> dict[Hashable, Any]: + def reindex_like(self, other: Self) -> dict[Hashable, Any]: """Query the index with another index of the same type. Implementation is optional but required in order to support alignment. @@ -304,7 +302,7 @@ def reindex_like(self: T_Index, other: T_Index) -> dict[Hashable, Any]: """ raise NotImplementedError(f"{self!r} doesn't support re-indexing labels") - def equals(self: T_Index, other: T_Index) -> bool: + def equals(self, other: Self) -> bool: """Compare this index with another index of the same type. Implementation is optional but required in order to support alignment. @@ -321,7 +319,7 @@ def equals(self: T_Index, other: T_Index) -> bool: """ raise NotImplementedError() - def roll(self: T_Index, shifts: Mapping[Any, int]) -> T_Index | None: + def roll(self, shifts: Mapping[Any, int]) -> Self | None: """Roll this index by an offset along one or more dimensions. This method can be re-implemented in subclasses of Index, e.g., when the @@ -347,10 +345,10 @@ def roll(self: T_Index, shifts: Mapping[Any, int]) -> T_Index | None: return None def rename( - self: T_Index, + self, name_dict: Mapping[Any, Hashable], dims_dict: Mapping[Any, Hashable], - ) -> T_Index: + ) -> Self: """Maybe update the index with new coordinate and dimension names. This method should be re-implemented in subclasses of Index if it has @@ -377,7 +375,7 @@ def rename( """ return self - def copy(self: T_Index, deep: bool = True) -> T_Index: + def copy(self, deep: bool = True) -> Self: """Return a (deep) copy of this index. Implementation in subclasses of Index is optional. The base class @@ -396,15 +394,13 @@ def copy(self: T_Index, deep: bool = True) -> T_Index: """ return self._copy(deep=deep) - def __copy__(self: T_Index) -> T_Index: + def __copy__(self) -> Self: return self.copy(deep=False) def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Index: return self._copy(deep=True, memo=memo) - def _copy( - self: T_Index, deep: bool = True, memo: dict[int, Any] | None = None - ) -> T_Index: + def _copy(self, deep: bool = True, memo: dict[int, Any] | None = None) -> Self: cls = self.__class__ copied = cls.__new__(cls) if deep: @@ -414,7 +410,7 @@ def _copy( copied.__dict__.update(self.__dict__) return copied - def __getitem__(self: T_Index, indexer: Any) -> T_Index: + def __getitem__(self, indexer: Any) -> Self: raise NotImplementedError() def _repr_inline_(self, max_width): @@ -674,10 +670,10 @@ def _concat_indexes(indexes, dim, positions=None) -> pd.Index: @classmethod def concat( cls, - indexes: Sequence[PandasIndex], + indexes: Sequence[Self], dim: Hashable, positions: Iterable[Iterable[int]] | None = None, - ) -> PandasIndex: + ) -> Self: new_pd_index = cls._concat_indexes(indexes, dim, positions) if not indexes: @@ -800,7 +796,11 @@ def equals(self, other: Index): return False return self.index.equals(other.index) and self.dim == other.dim - def join(self: PandasIndex, other: PandasIndex, how: str = "inner") -> PandasIndex: + def join( + self, + other: Self, + how: str = "inner", + ) -> Self: if how == "outer": index = self.index.union(other.index) else: @@ -811,7 +811,7 @@ def join(self: PandasIndex, other: PandasIndex, how: str = "inner") -> PandasInd return type(self)(index, self.dim, coord_dtype=coord_dtype) def reindex_like( - self, other: PandasIndex, method=None, tolerance=None + self, other: Self, method=None, tolerance=None ) -> dict[Hashable, Any]: if not self.index.is_unique: raise ValueError( @@ -963,12 +963,12 @@ def from_variables( return obj @classmethod - def concat( # type: ignore[override] + def concat( cls, - indexes: Sequence[PandasMultiIndex], + indexes: Sequence[Self], dim: Hashable, positions: Iterable[Iterable[int]] | None = None, - ) -> PandasMultiIndex: + ) -> Self: new_pd_index = cls._concat_indexes(indexes, dim, positions) if not indexes: @@ -1602,7 +1602,7 @@ def to_pandas_indexes(self) -> Indexes[pd.Index]: return Indexes(indexes, self._variables, index_type=pd.Index) def copy_indexes( - self, deep: bool = True, memo: dict[int, Any] | None = None + self, deep: bool = True, memo: dict[int, T_PandasOrXarrayIndex] | None = None ) -> tuple[dict[Hashable, T_PandasOrXarrayIndex], dict[Hashable, Variable]]: """Return a new dictionary with copies of indexes, preserving unique indexes. @@ -1619,6 +1619,7 @@ def copy_indexes( new_indexes = {} new_index_vars = {} + idx: T_PandasOrXarrayIndex for idx, coords in self.group_by_index(): if isinstance(idx, pd.Index): convert_new_idx = True From 3ace2fb4612d4bc1cbce6fa22fe3954a0e06599e Mon Sep 17 00:00:00 2001 From: Maximilian Roos <5635139+max-sixty@users.noreply.github.com> Date: Wed, 20 Sep 2023 18:53:40 -0700 Subject: [PATCH 06/17] Use `Self` rather than concrete types, remove `cast`s (#8216) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Use `Self` rather than concrete types, remove `cast`s This should also allow for subtyping * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Undo one `Self` * Unused ignore * Add check for redundant self annotations * And `DataWithCoords` * And `DataArray` & `Dataset` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * And `Variable` * Update xarray/core/dataarray.py Co-authored-by: Michael Niklas * Update xarray/core/dataarray.py Co-authored-by: Michael Niklas * Update xarray/core/dataarray.py Co-authored-by: Michael Niklas * Clean-ups — `other`, casts, obsolete comments * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * another one --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Michael Niklas --- pyproject.toml | 1 + xarray/core/accessor_str.py | 2 +- xarray/core/common.py | 37 ++-- xarray/core/concat.py | 8 +- xarray/core/coordinates.py | 28 +-- xarray/core/dataarray.py | 331 ++++++++++++++---------------- xarray/core/dataset.py | 364 ++++++++++++++++----------------- xarray/core/variable.py | 60 +++--- xarray/tests/test_dataarray.py | 2 +- 9 files changed, 400 insertions(+), 433 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 663920f8dbb..cb51c6ea741 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -72,6 +72,7 @@ source = ["xarray"] exclude_lines = ["pragma: no cover", "if TYPE_CHECKING"] [tool.mypy] +enable_error_code = "redundant-self" exclude = 'xarray/util/generate_.*\.py' files = "xarray" show_error_codes = true diff --git a/xarray/core/accessor_str.py b/xarray/core/accessor_str.py index aa6dc2c7114..573200b5c88 100644 --- a/xarray/core/accessor_str.py +++ b/xarray/core/accessor_str.py @@ -2386,7 +2386,7 @@ def _partitioner( # _apply breaks on an empty array in this case if not self._obj.size: - return self._obj.copy().expand_dims({dim: 0}, axis=-1) # type: ignore[return-value] + return self._obj.copy().expand_dims({dim: 0}, axis=-1) arrfunc = lambda x, isep: np.array(func(x, isep), dtype=self._obj.dtype) diff --git a/xarray/core/common.py b/xarray/core/common.py index 224b4154ef8..e4e3e60e815 100644 --- a/xarray/core/common.py +++ b/xarray/core/common.py @@ -45,6 +45,7 @@ DatetimeLike, DTypeLikeSave, ScalarOrArray, + Self, SideOptions, T_Chunks, T_DataWithCoords, @@ -381,11 +382,11 @@ class DataWithCoords(AttrAccessMixin): __slots__ = ("_close",) def squeeze( - self: T_DataWithCoords, + self, dim: Hashable | Iterable[Hashable] | None = None, drop: bool = False, axis: int | Iterable[int] | None = None, - ) -> T_DataWithCoords: + ) -> Self: """Return a new object with squeezed data. Parameters @@ -414,12 +415,12 @@ def squeeze( return self.isel(drop=drop, **{d: 0 for d in dims}) def clip( - self: T_DataWithCoords, + self, min: ScalarOrArray | None = None, max: ScalarOrArray | None = None, *, keep_attrs: bool | None = None, - ) -> T_DataWithCoords: + ) -> Self: """ Return an array whose values are limited to ``[min, max]``. At least one of max or min must be given. @@ -472,10 +473,10 @@ def _calc_assign_results( return {k: v(self) if callable(v) else v for k, v in kwargs.items()} def assign_coords( - self: T_DataWithCoords, + self, coords: Mapping[Any, Any] | None = None, **coords_kwargs: Any, - ) -> T_DataWithCoords: + ) -> Self: """Assign new coordinates to this object. Returns a new object with all the original data in addition to the new @@ -620,9 +621,7 @@ def assign_coords( data.coords.update(results) return data - def assign_attrs( - self: T_DataWithCoords, *args: Any, **kwargs: Any - ) -> T_DataWithCoords: + def assign_attrs(self, *args: Any, **kwargs: Any) -> Self: """Assign new attrs to this object. Returns a new object equivalent to ``self.attrs.update(*args, **kwargs)``. @@ -1061,9 +1060,7 @@ def _resample( restore_coord_dims=restore_coord_dims, ) - def where( - self: T_DataWithCoords, cond: Any, other: Any = dtypes.NA, drop: bool = False - ) -> T_DataWithCoords: + def where(self, cond: Any, other: Any = dtypes.NA, drop: bool = False) -> Self: """Filter elements from this object according to a condition. Returns elements from 'DataArray', where 'cond' is True, @@ -1208,9 +1205,7 @@ def close(self) -> None: self._close() self._close = None - def isnull( - self: T_DataWithCoords, keep_attrs: bool | None = None - ) -> T_DataWithCoords: + def isnull(self, keep_attrs: bool | None = None) -> Self: """Test each value in the array for whether it is a missing value. Parameters @@ -1253,9 +1248,7 @@ def isnull( keep_attrs=keep_attrs, ) - def notnull( - self: T_DataWithCoords, keep_attrs: bool | None = None - ) -> T_DataWithCoords: + def notnull(self, keep_attrs: bool | None = None) -> Self: """Test each value in the array for whether it is not a missing value. Parameters @@ -1298,7 +1291,7 @@ def notnull( keep_attrs=keep_attrs, ) - def isin(self: T_DataWithCoords, test_elements: Any) -> T_DataWithCoords: + def isin(self, test_elements: Any) -> Self: """Tests each value in the array for whether it is in test elements. Parameters @@ -1347,7 +1340,7 @@ def isin(self: T_DataWithCoords, test_elements: Any) -> T_DataWithCoords: ) def astype( - self: T_DataWithCoords, + self, dtype, *, order=None, @@ -1355,7 +1348,7 @@ def astype( subok=None, copy=None, keep_attrs=True, - ) -> T_DataWithCoords: + ) -> Self: """ Copy of the xarray object, with data cast to a specified type. Leaves coordinate dtype unchanged. @@ -1422,7 +1415,7 @@ def astype( dask="allowed", ) - def __enter__(self: T_DataWithCoords) -> T_DataWithCoords: + def __enter__(self) -> Self: return self def __exit__(self, exc_type, exc_value, traceback) -> None: diff --git a/xarray/core/concat.py b/xarray/core/concat.py index a76bb6b0033..a136480b2fb 100644 --- a/xarray/core/concat.py +++ b/xarray/core/concat.py @@ -1,7 +1,7 @@ from __future__ import annotations from collections.abc import Hashable, Iterable -from typing import TYPE_CHECKING, Any, Union, cast, overload +from typing import TYPE_CHECKING, Any, Union, overload import numpy as np import pandas as pd @@ -504,8 +504,7 @@ def _dataset_concat( # case where concat dimension is a coordinate or data_var but not a dimension if (dim in coord_names or dim in data_names) and dim not in dim_names: - # TODO: Overriding type because .expand_dims has incorrect typing: - datasets = [cast(T_Dataset, ds.expand_dims(dim)) for ds in datasets] + datasets = [ds.expand_dims(dim) for ds in datasets] # determine which variables to concatenate concat_over, equals, concat_dim_lengths = _calc_concat_over( @@ -708,8 +707,7 @@ def _dataarray_concat( if compat == "identical": raise ValueError("array names not identical") else: - # TODO: Overriding type because .rename has incorrect typing: - arr = cast(T_DataArray, arr.rename(name)) + arr = arr.rename(name) datasets.append(arr._to_temp_dataset()) ds = _dataset_concat( diff --git a/xarray/core/coordinates.py b/xarray/core/coordinates.py index e20c022e637..97ba383ebde 100644 --- a/xarray/core/coordinates.py +++ b/xarray/core/coordinates.py @@ -23,7 +23,7 @@ create_default_index_implicit, ) from xarray.core.merge import merge_coordinates_without_align, merge_coords -from xarray.core.types import Self, T_DataArray +from xarray.core.types import Self, T_DataArray, T_Xarray from xarray.core.utils import ( Frozen, ReprObject, @@ -425,7 +425,7 @@ def __delitem__(self, key: Hashable) -> None: # redirect to DatasetCoordinates.__delitem__ del self._data.coords[key] - def equals(self, other: Coordinates) -> bool: + def equals(self, other: Self) -> bool: """Two Coordinates objects are equal if they have matching variables, all of which are equal. @@ -437,7 +437,7 @@ def equals(self, other: Coordinates) -> bool: return False return self.to_dataset().equals(other.to_dataset()) - def identical(self, other: Coordinates) -> bool: + def identical(self, other: Self) -> bool: """Like equals, but also checks all variable attributes. See Also @@ -565,9 +565,7 @@ def update(self, other: Mapping[Any, Any]) -> None: self._update_coords(coords, indexes) - def assign( - self, coords: Mapping | None = None, **coords_kwargs: Any - ) -> Coordinates: + def assign(self, coords: Mapping | None = None, **coords_kwargs: Any) -> Self: """Assign new coordinates (and indexes) to a Coordinates object, returning a new object with all the original coordinates in addition to the new ones. @@ -656,7 +654,7 @@ def copy( self, deep: bool = False, memo: dict[int, Any] | None = None, - ) -> Coordinates: + ) -> Self: """Return a copy of this Coordinates object.""" # do not copy indexes (may corrupt multi-coordinate indexes) # TODO: disable variables deepcopy? it may also be problematic when they @@ -664,8 +662,16 @@ def copy( variables = { k: v._copy(deep=deep, memo=memo) for k, v in self.variables.items() } - return Coordinates._construct_direct( - coords=variables, indexes=dict(self.xindexes), dims=dict(self.sizes) + + # TODO: getting an error with `self._construct_direct`, possibly because of how + # a subclass implements `_construct_direct`. (This was originally the same + # runtime code, but we switched the type definitions in #8216, which + # necessitates the cast.) + return cast( + Self, + Coordinates._construct_direct( + coords=variables, indexes=dict(self.xindexes), dims=dict(self.sizes) + ), ) @@ -915,9 +921,7 @@ def drop_indexed_coords( return Coordinates._construct_direct(coords=new_variables, indexes=new_indexes) -def assert_coordinate_consistent( - obj: T_DataArray | Dataset, coords: Mapping[Any, Variable] -) -> None: +def assert_coordinate_consistent(obj: T_Xarray, coords: Mapping[Any, Variable]) -> None: """Make sure the dimension coordinate of obj is consistent with coords. obj: DataArray or Dataset diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index 791aad5cd17..73464c07c82 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -4,7 +4,15 @@ import warnings from collections.abc import Hashable, Iterable, Mapping, MutableMapping, Sequence from os import PathLike -from typing import TYPE_CHECKING, Any, Callable, Literal, NoReturn, cast, overload +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Generic, + Literal, + NoReturn, + overload, +) import numpy as np import pandas as pd @@ -41,6 +49,7 @@ from xarray.core.indexing import is_fancy_indexer, map_index_queries from xarray.core.merge import PANDAS_TYPES, MergeError from xarray.core.options import OPTIONS, _get_keep_attrs +from xarray.core.types import DaCompatible, T_DataArray, T_DataArrayOrSet from xarray.core.utils import ( Default, HybridMappingProxy, @@ -100,8 +109,8 @@ QueryEngineOptions, QueryParserOptions, ReindexMethodOptions, + Self, SideOptions, - T_DataArray, T_Xarray, ) from xarray.core.weighted import DataArrayWeighted @@ -213,13 +222,13 @@ def _check_data_shape(data, coords, dims): return data -class _LocIndexer: +class _LocIndexer(Generic[T_DataArray]): __slots__ = ("data_array",) - def __init__(self, data_array: DataArray): + def __init__(self, data_array: T_DataArray): self.data_array = data_array - def __getitem__(self, key) -> DataArray: + def __getitem__(self, key) -> T_DataArray: if not utils.is_dict_like(key): # expand the indexer so we can handle Ellipsis labels = indexing.expanded_indexer(key, self.data_array.ndim) @@ -462,12 +471,12 @@ def __init__( @classmethod def _construct_direct( - cls: type[T_DataArray], + cls, variable: Variable, coords: dict[Any, Variable], name: Hashable, indexes: dict[Hashable, Index], - ) -> T_DataArray: + ) -> Self: """Shortcut around __init__ for internal use when we want to skip costly validation """ @@ -480,12 +489,12 @@ def _construct_direct( return obj def _replace( - self: T_DataArray, + self, variable: Variable | None = None, coords=None, name: Hashable | None | Default = _default, indexes=None, - ) -> T_DataArray: + ) -> Self: if variable is None: variable = self.variable if coords is None: @@ -497,10 +506,10 @@ def _replace( return type(self)(variable, coords, name=name, indexes=indexes, fastpath=True) def _replace_maybe_drop_dims( - self: T_DataArray, + self, variable: Variable, name: Hashable | None | Default = _default, - ) -> T_DataArray: + ) -> Self: if variable.dims == self.dims and variable.shape == self.shape: coords = self._coords.copy() indexes = self._indexes @@ -522,12 +531,12 @@ def _replace_maybe_drop_dims( return self._replace(variable, coords, name, indexes=indexes) def _overwrite_indexes( - self: T_DataArray, + self, indexes: Mapping[Any, Index], variables: Mapping[Any, Variable] | None = None, drop_coords: list[Hashable] | None = None, rename_dims: Mapping[Any, Any] | None = None, - ) -> T_DataArray: + ) -> Self: """Maybe replace indexes and their corresponding coordinates.""" if not indexes: return self @@ -560,8 +569,8 @@ def _to_temp_dataset(self) -> Dataset: return self._to_dataset_whole(name=_THIS_ARRAY, shallow_copy=False) def _from_temp_dataset( - self: T_DataArray, dataset: Dataset, name: Hashable | None | Default = _default - ) -> T_DataArray: + self, dataset: Dataset, name: Hashable | None | Default = _default + ) -> Self: variable = dataset._variables.pop(_THIS_ARRAY) coords = dataset._variables indexes = dataset._indexes @@ -773,7 +782,7 @@ def to_numpy(self) -> np.ndarray: """ return self.variable.to_numpy() - def as_numpy(self: T_DataArray) -> T_DataArray: + def as_numpy(self) -> Self: """ Coerces wrapped data and coordinates into numpy arrays, returning a DataArray. @@ -828,7 +837,7 @@ def _item_key_to_dict(self, key: Any) -> Mapping[Hashable, Any]: key = indexing.expanded_indexer(key, self.ndim) return dict(zip(self.dims, key)) - def _getitem_coord(self: T_DataArray, key: Any) -> T_DataArray: + def _getitem_coord(self, key: Any) -> Self: from xarray.core.dataset import _get_virtual_variable try: @@ -839,7 +848,7 @@ def _getitem_coord(self: T_DataArray, key: Any) -> T_DataArray: return self._replace_maybe_drop_dims(var, name=key) - def __getitem__(self: T_DataArray, key: Any) -> T_DataArray: + def __getitem__(self, key: Any) -> Self: if isinstance(key, str): return self._getitem_coord(key) else: @@ -909,7 +918,7 @@ def encoding(self) -> dict[Any, Any]: def encoding(self, value: Mapping[Any, Any]) -> None: self.variable.encoding = dict(value) - def reset_encoding(self: T_DataArray) -> T_DataArray: + def reset_encoding(self) -> Self: """Return a new DataArray without encoding on the array or any attached coords.""" ds = self._to_temp_dataset().reset_encoding() @@ -949,7 +958,7 @@ def coords(self) -> DataArrayCoordinates: @overload def reset_coords( - self: T_DataArray, + self, names: Dims = None, drop: Literal[False] = False, ) -> Dataset: @@ -957,18 +966,18 @@ def reset_coords( @overload def reset_coords( - self: T_DataArray, + self, names: Dims = None, *, drop: Literal[True], - ) -> T_DataArray: + ) -> Self: ... def reset_coords( - self: T_DataArray, + self, names: Dims = None, drop: bool = False, - ) -> T_DataArray | Dataset: + ) -> Self | Dataset: """Given names of coordinates, reset them to become variables. Parameters @@ -1080,15 +1089,15 @@ def __dask_postpersist__(self): func, args = self._to_temp_dataset().__dask_postpersist__() return self._dask_finalize, (self.name, func) + args - @staticmethod - def _dask_finalize(results, name, func, *args, **kwargs) -> DataArray: + @classmethod + def _dask_finalize(cls, results, name, func, *args, **kwargs) -> Self: ds = func(results, *args, **kwargs) variable = ds._variables.pop(_THIS_ARRAY) coords = ds._variables indexes = ds._indexes - return DataArray(variable, coords, name=name, indexes=indexes, fastpath=True) + return cls(variable, coords, name=name, indexes=indexes, fastpath=True) - def load(self: T_DataArray, **kwargs) -> T_DataArray: + def load(self, **kwargs) -> Self: """Manually trigger loading of this array's data from disk or a remote source into memory and return this array. @@ -1112,7 +1121,7 @@ def load(self: T_DataArray, **kwargs) -> T_DataArray: self._coords = new._coords return self - def compute(self: T_DataArray, **kwargs) -> T_DataArray: + def compute(self, **kwargs) -> Self: """Manually trigger loading of this array's data from disk or a remote source into memory and return a new array. The original is left unaltered. @@ -1134,7 +1143,7 @@ def compute(self: T_DataArray, **kwargs) -> T_DataArray: new = self.copy(deep=False) return new.load(**kwargs) - def persist(self: T_DataArray, **kwargs) -> T_DataArray: + def persist(self, **kwargs) -> Self: """Trigger computation in constituent dask arrays This keeps them as dask arrays but encourages them to keep data in @@ -1153,7 +1162,7 @@ def persist(self: T_DataArray, **kwargs) -> T_DataArray: ds = self._to_temp_dataset().persist(**kwargs) return self._from_temp_dataset(ds) - def copy(self: T_DataArray, deep: bool = True, data: Any = None) -> T_DataArray: + def copy(self, deep: bool = True, data: Any = None) -> Self: """Returns a copy of this array. If `deep=True`, a deep copy is made of the data array. @@ -1224,11 +1233,11 @@ def copy(self: T_DataArray, deep: bool = True, data: Any = None) -> T_DataArray: return self._copy(deep=deep, data=data) def _copy( - self: T_DataArray, + self, deep: bool = True, data: Any = None, memo: dict[int, Any] | None = None, - ) -> T_DataArray: + ) -> Self: variable = self.variable._copy(deep=deep, data=data, memo=memo) indexes, index_vars = self.xindexes.copy_indexes(deep=deep) @@ -1241,12 +1250,10 @@ def _copy( return self._replace(variable, coords, indexes=indexes) - def __copy__(self: T_DataArray) -> T_DataArray: + def __copy__(self) -> Self: return self._copy(deep=False) - def __deepcopy__( - self: T_DataArray, memo: dict[int, Any] | None = None - ) -> T_DataArray: + def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Self: return self._copy(deep=True, memo=memo) # mutable objects should not be Hashable @@ -1287,7 +1294,7 @@ def chunksizes(self) -> Mapping[Any, tuple[int, ...]]: return get_chunksizes(all_variables) def chunk( - self: T_DataArray, + self, chunks: ( int | Literal["auto"] @@ -1302,7 +1309,7 @@ def chunk( chunked_array_type: str | ChunkManagerEntrypoint | None = None, from_array_kwargs=None, **chunks_kwargs: Any, - ) -> T_DataArray: + ) -> Self: """Coerce this array's data into a dask arrays with the given chunks. If this variable is a non-dask array, it will be converted to dask @@ -1380,12 +1387,12 @@ def chunk( return self._from_temp_dataset(ds) def isel( - self: T_DataArray, + self, indexers: Mapping[Any, Any] | None = None, drop: bool = False, missing_dims: ErrorOptionsWithWarn = "raise", **indexers_kwargs: Any, - ) -> T_DataArray: + ) -> Self: """Return a new DataArray whose data is given by selecting indexes along the specified dimension(s). @@ -1471,13 +1478,13 @@ def isel( return self._replace(variable=variable, coords=coords, indexes=indexes) def sel( - self: T_DataArray, + self, indexers: Mapping[Any, Any] | None = None, method: str | None = None, tolerance=None, drop: bool = False, **indexers_kwargs: Any, - ) -> T_DataArray: + ) -> Self: """Return a new DataArray whose data is given by selecting index labels along the specified dimension(s). @@ -1590,10 +1597,10 @@ def sel( return self._from_temp_dataset(ds) def head( - self: T_DataArray, + self, indexers: Mapping[Any, int] | int | None = None, **indexers_kwargs: Any, - ) -> T_DataArray: + ) -> Self: """Return a new DataArray whose data is given by the the first `n` values along the specified dimension(s). Default `n` = 5 @@ -1633,10 +1640,10 @@ def head( return self._from_temp_dataset(ds) def tail( - self: T_DataArray, + self, indexers: Mapping[Any, int] | int | None = None, **indexers_kwargs: Any, - ) -> T_DataArray: + ) -> Self: """Return a new DataArray whose data is given by the the last `n` values along the specified dimension(s). Default `n` = 5 @@ -1680,10 +1687,10 @@ def tail( return self._from_temp_dataset(ds) def thin( - self: T_DataArray, + self, indexers: Mapping[Any, int] | int | None = None, **indexers_kwargs: Any, - ) -> T_DataArray: + ) -> Self: """Return a new DataArray whose data is given by each `n` value along the specified dimension(s). @@ -1730,10 +1737,10 @@ def thin( return self._from_temp_dataset(ds) def broadcast_like( - self: T_DataArray, - other: DataArray | Dataset, + self, + other: T_DataArrayOrSet, exclude: Iterable[Hashable] | None = None, - ) -> T_DataArray: + ) -> Self: """Broadcast this DataArray against another Dataset or DataArray. This is equivalent to xr.broadcast(other, self)[1] @@ -1803,12 +1810,10 @@ def broadcast_like( dims_map, common_coords = _get_broadcast_dims_map_common_coords(args, exclude) - return _broadcast_helper( - cast("T_DataArray", args[1]), exclude, dims_map, common_coords - ) + return _broadcast_helper(args[1], exclude, dims_map, common_coords) def _reindex_callback( - self: T_DataArray, + self, aligner: alignment.Aligner, dim_pos_indexers: dict[Hashable, Any], variables: dict[Hashable, Variable], @@ -1816,7 +1821,7 @@ def _reindex_callback( fill_value: Any, exclude_dims: frozenset[Hashable], exclude_vars: frozenset[Hashable], - ) -> T_DataArray: + ) -> Self: """Callback called from ``Aligner`` to create a new reindexed DataArray.""" if isinstance(fill_value, dict): @@ -1843,13 +1848,13 @@ def _reindex_callback( return da def reindex_like( - self: T_DataArray, - other: DataArray | Dataset, + self, + other: T_DataArrayOrSet, method: ReindexMethodOptions = None, tolerance: int | float | Iterable[int | float] | None = None, copy: bool = True, fill_value=dtypes.NA, - ) -> T_DataArray: + ) -> Self: """Conform this object onto the indexes of another object, filling in missing values with ``fill_value``. The default fill value is NaN. @@ -2013,14 +2018,14 @@ def reindex_like( ) def reindex( - self: T_DataArray, + self, indexers: Mapping[Any, Any] | None = None, method: ReindexMethodOptions = None, tolerance: float | Iterable[float] | None = None, copy: bool = True, fill_value=dtypes.NA, **indexers_kwargs: Any, - ) -> T_DataArray: + ) -> Self: """Conform this object onto the indexes of another object, filling in missing values with ``fill_value``. The default fill value is NaN. @@ -2104,13 +2109,13 @@ def reindex( ) def interp( - self: T_DataArray, + self, coords: Mapping[Any, Any] | None = None, method: InterpOptions = "linear", assume_sorted: bool = False, kwargs: Mapping[str, Any] | None = None, **coords_kwargs: Any, - ) -> T_DataArray: + ) -> Self: """Interpolate a DataArray onto new coordinates Performs univariate or multivariate interpolation of a DataArray onto @@ -2247,12 +2252,12 @@ def interp( return self._from_temp_dataset(ds) def interp_like( - self: T_DataArray, - other: DataArray | Dataset, + self, + other: T_Xarray, method: InterpOptions = "linear", assume_sorted: bool = False, kwargs: Mapping[str, Any] | None = None, - ) -> T_DataArray: + ) -> Self: """Interpolate this object onto the coordinates of another object, filling out of range values with NaN. @@ -2369,13 +2374,11 @@ def interp_like( ) return self._from_temp_dataset(ds) - # change type of self and return to T_DataArray once - # https://github.com/python/mypy/issues/12846 is resolved def rename( self, new_name_or_name_dict: Hashable | Mapping[Any, Hashable] | None = None, **names: Hashable, - ) -> DataArray: + ) -> Self: """Returns a new DataArray with renamed coordinates, dimensions or a new name. Parameters @@ -2416,10 +2419,10 @@ def rename( return self._replace(name=new_name_or_name_dict) def swap_dims( - self: T_DataArray, + self, dims_dict: Mapping[Any, Hashable] | None = None, **dims_kwargs, - ) -> T_DataArray: + ) -> Self: """Returns a new DataArray with swapped dimensions. Parameters @@ -2474,14 +2477,12 @@ def swap_dims( ds = self._to_temp_dataset().swap_dims(dims_dict) return self._from_temp_dataset(ds) - # change type of self and return to T_DataArray once - # https://github.com/python/mypy/issues/12846 is resolved def expand_dims( self, dim: None | Hashable | Sequence[Hashable] | Mapping[Any, Any] = None, axis: None | int | Sequence[int] = None, **dim_kwargs: Any, - ) -> DataArray: + ) -> Self: """Return a new object with an additional axis (or axes) inserted at the corresponding position in the array shape. The new object is a view into the underlying array, not a copy. @@ -2570,14 +2571,12 @@ def expand_dims( ds = self._to_temp_dataset().expand_dims(dim, axis) return self._from_temp_dataset(ds) - # change type of self and return to T_DataArray once - # https://github.com/python/mypy/issues/12846 is resolved def set_index( self, indexes: Mapping[Any, Hashable | Sequence[Hashable]] | None = None, append: bool = False, **indexes_kwargs: Hashable | Sequence[Hashable], - ) -> DataArray: + ) -> Self: """Set DataArray (multi-)indexes using one or more existing coordinates. @@ -2635,13 +2634,11 @@ def set_index( ds = self._to_temp_dataset().set_index(indexes, append=append, **indexes_kwargs) return self._from_temp_dataset(ds) - # change type of self and return to T_DataArray once - # https://github.com/python/mypy/issues/12846 is resolved def reset_index( self, dims_or_levels: Hashable | Sequence[Hashable], drop: bool = False, - ) -> DataArray: + ) -> Self: """Reset the specified index(es) or multi-index level(s). This legacy method is specific to pandas (multi-)indexes and @@ -2675,11 +2672,11 @@ def reset_index( return self._from_temp_dataset(ds) def set_xindex( - self: T_DataArray, + self, coord_names: str | Sequence[Hashable], index_cls: type[Index] | None = None, **options, - ) -> T_DataArray: + ) -> Self: """Set a new, Xarray-compatible index from one or more existing coordinate(s). @@ -2704,10 +2701,10 @@ def set_xindex( return self._from_temp_dataset(ds) def reorder_levels( - self: T_DataArray, + self, dim_order: Mapping[Any, Sequence[int | Hashable]] | None = None, **dim_order_kwargs: Sequence[int | Hashable], - ) -> T_DataArray: + ) -> Self: """Rearrange index levels using input order. Parameters @@ -2730,12 +2727,12 @@ def reorder_levels( return self._from_temp_dataset(ds) def stack( - self: T_DataArray, + self, dimensions: Mapping[Any, Sequence[Hashable]] | None = None, create_index: bool | None = True, index_cls: type[Index] = PandasMultiIndex, **dimensions_kwargs: Sequence[Hashable], - ) -> T_DataArray: + ) -> Self: """ Stack any number of existing dimensions into a single new dimension. @@ -2802,14 +2799,12 @@ def stack( ) return self._from_temp_dataset(ds) - # change type of self and return to T_DataArray once - # https://github.com/python/mypy/issues/12846 is resolved def unstack( self, dim: Dims = None, fill_value: Any = dtypes.NA, sparse: bool = False, - ) -> DataArray: + ) -> Self: """ Unstack existing dimensions corresponding to MultiIndexes into multiple new dimensions. @@ -2933,11 +2928,11 @@ def to_unstacked_dataset(self, dim: Hashable, level: int | Hashable = 0) -> Data return Dataset(data_dict) def transpose( - self: T_DataArray, + self, *dims: Hashable, transpose_coords: bool = True, missing_dims: ErrorOptionsWithWarn = "raise", - ) -> T_DataArray: + ) -> Self: """Return a new DataArray object with transposed dimensions. Parameters @@ -2983,17 +2978,15 @@ def transpose( return self._replace(variable) @property - def T(self: T_DataArray) -> T_DataArray: + def T(self) -> Self: return self.transpose() - # change type of self and return to T_DataArray once - # https://github.com/python/mypy/issues/12846 is resolved def drop_vars( self, names: Hashable | Iterable[Hashable], *, errors: ErrorOptions = "raise", - ) -> DataArray: + ) -> Self: """Returns an array with dropped variables. Parameters @@ -3054,11 +3047,11 @@ def drop_vars( return self._from_temp_dataset(ds) def drop_indexes( - self: T_DataArray, + self, coord_names: Hashable | Iterable[Hashable], *, errors: ErrorOptions = "raise", - ) -> T_DataArray: + ) -> Self: """Drop the indexes assigned to the given coordinates. Parameters @@ -3079,13 +3072,13 @@ def drop_indexes( return self._from_temp_dataset(ds) def drop( - self: T_DataArray, + self, labels: Mapping[Any, Any] | None = None, dim: Hashable | None = None, *, errors: ErrorOptions = "raise", **labels_kwargs, - ) -> T_DataArray: + ) -> Self: """Backward compatible method based on `drop_vars` and `drop_sel` Using either `drop_vars` or `drop_sel` is encouraged @@ -3099,12 +3092,12 @@ def drop( return self._from_temp_dataset(ds) def drop_sel( - self: T_DataArray, + self, labels: Mapping[Any, Any] | None = None, *, errors: ErrorOptions = "raise", **labels_kwargs, - ) -> T_DataArray: + ) -> Self: """Drop index labels from this DataArray. Parameters @@ -3167,8 +3160,8 @@ def drop_sel( return self._from_temp_dataset(ds) def drop_isel( - self: T_DataArray, indexers: Mapping[Any, Any] | None = None, **indexers_kwargs - ) -> T_DataArray: + self, indexers: Mapping[Any, Any] | None = None, **indexers_kwargs + ) -> Self: """Drop index positions from this DataArray. Parameters @@ -3218,11 +3211,11 @@ def drop_isel( return self._from_temp_dataset(dataset) def dropna( - self: T_DataArray, + self, dim: Hashable, how: Literal["any", "all"] = "any", thresh: int | None = None, - ) -> T_DataArray: + ) -> Self: """Returns a new array with dropped labels for missing values along the provided dimension. @@ -3293,7 +3286,7 @@ def dropna( ds = self._to_temp_dataset().dropna(dim, how=how, thresh=thresh) return self._from_temp_dataset(ds) - def fillna(self: T_DataArray, value: Any) -> T_DataArray: + def fillna(self, value: Any) -> Self: """Fill missing values in this object. This operation follows the normal broadcasting and alignment rules that @@ -3356,7 +3349,7 @@ def fillna(self: T_DataArray, value: Any) -> T_DataArray: return out def interpolate_na( - self: T_DataArray, + self, dim: Hashable | None = None, method: InterpOptions = "linear", limit: int | None = None, @@ -3372,7 +3365,7 @@ def interpolate_na( ) = None, keep_attrs: bool | None = None, **kwargs: Any, - ) -> T_DataArray: + ) -> Self: """Fill in NaNs by interpolating according to different methods. Parameters @@ -3479,9 +3472,7 @@ def interpolate_na( **kwargs, ) - def ffill( - self: T_DataArray, dim: Hashable, limit: int | None = None - ) -> T_DataArray: + def ffill(self, dim: Hashable, limit: int | None = None) -> Self: """Fill NaN values by propagating values forward *Requires bottleneck.* @@ -3565,9 +3556,7 @@ def ffill( return ffill(self, dim, limit=limit) - def bfill( - self: T_DataArray, dim: Hashable, limit: int | None = None - ) -> T_DataArray: + def bfill(self, dim: Hashable, limit: int | None = None) -> Self: """Fill NaN values by propagating values backward *Requires bottleneck.* @@ -3651,7 +3640,7 @@ def bfill( return bfill(self, dim, limit=limit) - def combine_first(self: T_DataArray, other: T_DataArray) -> T_DataArray: + def combine_first(self, other: Self) -> Self: """Combine two DataArray objects, with union of coordinates. This operation follows the normal broadcasting and alignment rules of @@ -3670,7 +3659,7 @@ def combine_first(self: T_DataArray, other: T_DataArray) -> T_DataArray: return ops.fillna(self, other, join="outer") def reduce( - self: T_DataArray, + self, func: Callable[..., Any], dim: Dims = None, *, @@ -3678,7 +3667,7 @@ def reduce( keep_attrs: bool | None = None, keepdims: bool = False, **kwargs: Any, - ) -> T_DataArray: + ) -> Self: """Reduce this array by applying `func` along some dimension(s). Parameters @@ -3716,7 +3705,7 @@ def reduce( var = self.variable.reduce(func, dim, axis, keep_attrs, keepdims, **kwargs) return self._replace_maybe_drop_dims(var) - def to_pandas(self) -> DataArray | pd.Series | pd.DataFrame: + def to_pandas(self) -> Self | pd.Series | pd.DataFrame: """Convert this array into a pandas object with the same shape. The type of the returned object depends on the number of DataArray @@ -4270,7 +4259,7 @@ def to_dict( return d @classmethod - def from_dict(cls: type[T_DataArray], d: Mapping[str, Any]) -> T_DataArray: + def from_dict(cls, d: Mapping[str, Any]) -> Self: """Convert a dictionary into an xarray.DataArray Parameters @@ -4387,7 +4376,7 @@ def to_cdms2(self) -> cdms2_Variable: return to_cdms2(self) @classmethod - def from_cdms2(cls, variable: cdms2_Variable) -> DataArray: + def from_cdms2(cls, variable: cdms2_Variable) -> Self: """Convert a cdms2.Variable into an xarray.DataArray .. deprecated:: 2023.06.0 @@ -4414,13 +4403,13 @@ def to_iris(self) -> iris_Cube: return to_iris(self) @classmethod - def from_iris(cls, cube: iris_Cube) -> DataArray: + def from_iris(cls, cube: iris_Cube) -> Self: """Convert a iris.cube.Cube into an xarray.DataArray""" from xarray.convert import from_iris return from_iris(cube) - def _all_compat(self: T_DataArray, other: T_DataArray, compat_str: str) -> bool: + def _all_compat(self, other: Self, compat_str: str) -> bool: """Helper function for equals, broadcast_equals, and identical""" def compat(x, y): @@ -4430,7 +4419,7 @@ def compat(x, y): self, other ) - def broadcast_equals(self: T_DataArray, other: T_DataArray) -> bool: + def broadcast_equals(self, other: Self) -> bool: """Two DataArrays are broadcast equal if they are equal after broadcasting them against each other such that they have the same dimensions. @@ -4479,7 +4468,7 @@ def broadcast_equals(self: T_DataArray, other: T_DataArray) -> bool: except (TypeError, AttributeError): return False - def equals(self: T_DataArray, other: T_DataArray) -> bool: + def equals(self, other: Self) -> bool: """True if two DataArrays have the same dimensions, coordinates and values; otherwise False. @@ -4541,7 +4530,7 @@ def equals(self: T_DataArray, other: T_DataArray) -> bool: except (TypeError, AttributeError): return False - def identical(self: T_DataArray, other: T_DataArray) -> bool: + def identical(self, other: Self) -> bool: """Like equals, but also checks the array name and attributes, and attributes on all coordinates. @@ -4608,19 +4597,19 @@ def _result_name(self, other: Any = None) -> Hashable | None: else: return None - def __array_wrap__(self: T_DataArray, obj, context=None) -> T_DataArray: + def __array_wrap__(self, obj, context=None) -> Self: new_var = self.variable.__array_wrap__(obj, context) return self._replace(new_var) - def __matmul__(self: T_DataArray, obj: T_DataArray) -> T_DataArray: + def __matmul__(self, obj: T_Xarray) -> T_Xarray: return self.dot(obj) - def __rmatmul__(self: T_DataArray, other: T_DataArray) -> T_DataArray: + def __rmatmul__(self, other: T_Xarray) -> T_Xarray: # currently somewhat duplicative, as only other DataArrays are # compatible with matmul return computation.dot(other, self) - def _unary_op(self: T_DataArray, f: Callable, *args, **kwargs) -> T_DataArray: + def _unary_op(self, f: Callable, *args, **kwargs) -> Self: keep_attrs = kwargs.pop("keep_attrs", None) if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) @@ -4636,18 +4625,18 @@ def _unary_op(self: T_DataArray, f: Callable, *args, **kwargs) -> T_DataArray: return da def _binary_op( - self: T_DataArray, - other: Any, + self, + other: T_Xarray, f: Callable, reflexive: bool = False, - ) -> T_DataArray: + ) -> T_Xarray: from xarray.core.groupby import GroupBy if isinstance(other, (Dataset, GroupBy)): return NotImplemented if isinstance(other, DataArray): align_type = OPTIONS["arithmetic_join"] - self, other = align(self, other, join=align_type, copy=False) # type: ignore + self, other = align(self, other, join=align_type, copy=False) other_variable = getattr(other, "variable", other) other_coords = getattr(other, "coords", None) @@ -4661,7 +4650,7 @@ def _binary_op( return self._replace(variable, coords, name, indexes=indexes) - def _inplace_binary_op(self: T_DataArray, other: Any, f: Callable) -> T_DataArray: + def _inplace_binary_op(self, other: DaCompatible, f: Callable) -> Self: from xarray.core.groupby import GroupBy if isinstance(other, GroupBy): @@ -4721,11 +4710,11 @@ def _title_for_slice(self, truncate: int = 50) -> str: return title def diff( - self: T_DataArray, + self, dim: Hashable, n: int = 1, label: Literal["upper", "lower"] = "upper", - ) -> T_DataArray: + ) -> Self: """Calculate the n-th order discrete difference along given axis. Parameters @@ -4771,11 +4760,11 @@ def diff( return self._from_temp_dataset(ds) def shift( - self: T_DataArray, + self, shifts: Mapping[Any, int] | None = None, fill_value: Any = dtypes.NA, **shifts_kwargs: int, - ) -> T_DataArray: + ) -> Self: """Shift this DataArray by an offset along one or more dimensions. Only the data is moved; coordinates stay in place. This is consistent @@ -4821,11 +4810,11 @@ def shift( return self._replace(variable=variable) def roll( - self: T_DataArray, + self, shifts: Mapping[Hashable, int] | None = None, roll_coords: bool = False, **shifts_kwargs: int, - ) -> T_DataArray: + ) -> Self: """Roll this array by an offset along one or more dimensions. Unlike shift, roll treats the given dimensions as periodic, so will not @@ -4870,7 +4859,7 @@ def roll( return self._from_temp_dataset(ds) @property - def real(self: T_DataArray) -> T_DataArray: + def real(self) -> Self: """ The real part of the array. @@ -4881,7 +4870,7 @@ def real(self: T_DataArray) -> T_DataArray: return self._replace(self.variable.real) @property - def imag(self: T_DataArray) -> T_DataArray: + def imag(self) -> Self: """ The imaginary part of the array. @@ -4892,10 +4881,10 @@ def imag(self: T_DataArray) -> T_DataArray: return self._replace(self.variable.imag) def dot( - self: T_DataArray, - other: T_DataArray, + self, + other: T_Xarray, dims: Dims = None, - ) -> T_DataArray: + ) -> T_Xarray: """Perform dot product of two DataArrays along their shared dims. Equivalent to taking taking tensordot over all shared dims. @@ -4945,13 +4934,11 @@ def dot( return computation.dot(self, other, dims=dims) - # change type of self and return to T_DataArray once - # https://github.com/python/mypy/issues/12846 is resolved def sortby( self, variables: Hashable | DataArray | Sequence[Hashable | DataArray], ascending: bool = True, - ) -> DataArray: + ) -> Self: """Sort object by labels or values (along an axis). Sorts the dataarray, either along specified dimensions, @@ -5012,14 +4999,14 @@ def sortby( return self._from_temp_dataset(ds) def quantile( - self: T_DataArray, + self, q: ArrayLike, dim: Dims = None, method: QuantileMethods = "linear", keep_attrs: bool | None = None, skipna: bool | None = None, interpolation: QuantileMethods | None = None, - ) -> T_DataArray: + ) -> Self: """Compute the qth quantile of the data along the specified dimension. Returns the qth quantiles(s) of the array elements. @@ -5130,11 +5117,11 @@ def quantile( return self._from_temp_dataset(ds) def rank( - self: T_DataArray, + self, dim: Hashable, pct: bool = False, keep_attrs: bool | None = None, - ) -> T_DataArray: + ) -> Self: """Ranks the data. Equal values are assigned a rank that is the average of the ranks that @@ -5174,11 +5161,11 @@ def rank( return self._from_temp_dataset(ds) def differentiate( - self: T_DataArray, + self, coord: Hashable, edge_order: Literal[1, 2] = 1, datetime_unit: DatetimeUnitOptions = None, - ) -> T_DataArray: + ) -> Self: """ Differentiate the array with the second order accurate central differences. @@ -5236,13 +5223,11 @@ def differentiate( ds = self._to_temp_dataset().differentiate(coord, edge_order, datetime_unit) return self._from_temp_dataset(ds) - # change type of self and return to T_DataArray once - # https://github.com/python/mypy/issues/12846 is resolved def integrate( self, coord: Hashable | Sequence[Hashable] = None, datetime_unit: DatetimeUnitOptions = None, - ) -> DataArray: + ) -> Self: """Integrate along the given coordinate using the trapezoidal rule. .. note:: @@ -5292,13 +5277,11 @@ def integrate( ds = self._to_temp_dataset().integrate(coord, datetime_unit) return self._from_temp_dataset(ds) - # change type of self and return to T_DataArray once - # https://github.com/python/mypy/issues/12846 is resolved def cumulative_integrate( self, coord: Hashable | Sequence[Hashable] = None, datetime_unit: DatetimeUnitOptions = None, - ) -> DataArray: + ) -> Self: """Integrate cumulatively along the given coordinate using the trapezoidal rule. .. note:: @@ -5356,7 +5339,7 @@ def cumulative_integrate( ds = self._to_temp_dataset().cumulative_integrate(coord, datetime_unit) return self._from_temp_dataset(ds) - def unify_chunks(self) -> DataArray: + def unify_chunks(self) -> Self: """Unify chunk size along all chunked dimensions of this DataArray. Returns @@ -5541,7 +5524,7 @@ def polyfit( ) def pad( - self: T_DataArray, + self, pad_width: Mapping[Any, int | tuple[int, int]] | None = None, mode: PadModeOptions = "constant", stat_length: int @@ -5556,7 +5539,7 @@ def pad( reflect_type: PadReflectOptions = None, keep_attrs: bool | None = None, **pad_width_kwargs: Any, - ) -> T_DataArray: + ) -> Self: """Pad this array along one or more dimensions. .. warning:: @@ -5714,7 +5697,7 @@ def idxmin( skipna: bool | None = None, fill_value: Any = dtypes.NA, keep_attrs: bool | None = None, - ) -> DataArray: + ) -> Self: """Return the coordinate label of the minimum value along a dimension. Returns a new `DataArray` named after the dimension with the values of @@ -5810,7 +5793,7 @@ def idxmax( skipna: bool | None = None, fill_value: Any = dtypes.NA, keep_attrs: bool | None = None, - ) -> DataArray: + ) -> Self: """Return the coordinate label of the maximum value along a dimension. Returns a new `DataArray` named after the dimension with the values of @@ -5900,15 +5883,13 @@ def idxmax( keep_attrs=keep_attrs, ) - # change type of self and return to T_DataArray once - # https://github.com/python/mypy/issues/12846 is resolved def argmin( self, dim: Dims = None, axis: int | None = None, keep_attrs: bool | None = None, skipna: bool | None = None, - ) -> DataArray | dict[Hashable, DataArray]: + ) -> Self | dict[Hashable, Self]: """Index or indices of the minimum of the DataArray over one or more dimensions. If a sequence is passed to 'dim', then result returned as dict of DataArrays, @@ -6002,15 +5983,13 @@ def argmin( else: return self._replace_maybe_drop_dims(result) - # change type of self and return to T_DataArray once - # https://github.com/python/mypy/issues/12846 is resolved def argmax( self, dim: Dims = None, axis: int | None = None, keep_attrs: bool | None = None, skipna: bool | None = None, - ) -> DataArray | dict[Hashable, DataArray]: + ) -> Self | dict[Hashable, Self]: """Index or indices of the maximum of the DataArray over one or more dimensions. If a sequence is passed to 'dim', then result returned as dict of DataArrays, @@ -6352,10 +6331,10 @@ def curvefit( ) def drop_duplicates( - self: T_DataArray, + self, dim: Hashable | Iterable[Hashable], keep: Literal["first", "last", False] = "first", - ) -> T_DataArray: + ) -> Self: """Returns a new DataArray with duplicate dimension values removed. Parameters @@ -6437,7 +6416,7 @@ def convert_calendar( align_on: str | None = None, missing: Any | None = None, use_cftime: bool | None = None, - ) -> DataArray: + ) -> Self: """Convert the DataArray to another calendar. Only converts the individual timestamps, does not modify any data except @@ -6557,7 +6536,7 @@ def interp_calendar( self, target: pd.DatetimeIndex | CFTimeIndex | DataArray, dim: str = "time", - ) -> DataArray: + ) -> Self: """Interpolates the DataArray to another calendar based on decimal year measure. Each timestamp in `source` and `target` are first converted to their decimal diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index 48e25f7e1c7..9d771f0390c 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -93,7 +93,7 @@ is_duck_array, is_duck_dask_array, ) -from xarray.core.types import QuantileMethods, T_Dataset +from xarray.core.types import QuantileMethods, Self, T_DataArrayOrSet, T_Dataset from xarray.core.utils import ( Default, Frozen, @@ -698,11 +698,11 @@ def __init__( # TODO: dirty workaround for mypy 1.5 error with inherited DatasetOpsMixin vs. Mapping # related to https://github.com/python/mypy/issues/9319? - def __eq__(self: T_Dataset, other: DsCompatible) -> T_Dataset: # type: ignore[override] + def __eq__(self, other: DsCompatible) -> Self: # type: ignore[override] return super().__eq__(other) @classmethod - def load_store(cls: type[T_Dataset], store, decoder=None) -> T_Dataset: + def load_store(cls, store, decoder=None) -> Self: """Create a new dataset from the contents of a backends.*DataStore object """ @@ -746,7 +746,7 @@ def encoding(self) -> dict[Any, Any]: def encoding(self, value: Mapping[Any, Any]) -> None: self._encoding = dict(value) - def reset_encoding(self: T_Dataset) -> T_Dataset: + def reset_encoding(self) -> Self: """Return a new Dataset without encoding on the dataset or any of its variables/coords.""" variables = {k: v.reset_encoding() for k, v in self.variables.items()} @@ -802,7 +802,7 @@ def dtypes(self) -> Frozen[Hashable, np.dtype]: } ) - def load(self: T_Dataset, **kwargs) -> T_Dataset: + def load(self, **kwargs) -> Self: """Manually trigger loading and/or computation of this dataset's data from disk or a remote source into memory and return this dataset. Unlike compute, the original dataset is modified and returned. @@ -902,7 +902,7 @@ def __dask_postcompute__(self): def __dask_postpersist__(self): return self._dask_postpersist, () - def _dask_postcompute(self: T_Dataset, results: Iterable[Variable]) -> T_Dataset: + def _dask_postcompute(self, results: Iterable[Variable]) -> Self: import dask variables = {} @@ -925,8 +925,8 @@ def _dask_postcompute(self: T_Dataset, results: Iterable[Variable]) -> T_Dataset ) def _dask_postpersist( - self: T_Dataset, dsk: Mapping, *, rename: Mapping[str, str] | None = None - ) -> T_Dataset: + self, dsk: Mapping, *, rename: Mapping[str, str] | None = None + ) -> Self: from dask import is_dask_collection from dask.highlevelgraph import HighLevelGraph from dask.optimization import cull @@ -975,7 +975,7 @@ def _dask_postpersist( self._close, ) - def compute(self: T_Dataset, **kwargs) -> T_Dataset: + def compute(self, **kwargs) -> Self: """Manually trigger loading and/or computation of this dataset's data from disk or a remote source into memory and return a new dataset. Unlike load, the original dataset is left unaltered. @@ -997,7 +997,7 @@ def compute(self: T_Dataset, **kwargs) -> T_Dataset: new = self.copy(deep=False) return new.load(**kwargs) - def _persist_inplace(self: T_Dataset, **kwargs) -> T_Dataset: + def _persist_inplace(self, **kwargs) -> Self: """Persist all Dask arrays in memory""" # access .data to coerce everything to numpy or dask arrays lazy_data = { @@ -1014,7 +1014,7 @@ def _persist_inplace(self: T_Dataset, **kwargs) -> T_Dataset: return self - def persist(self: T_Dataset, **kwargs) -> T_Dataset: + def persist(self, **kwargs) -> Self: """Trigger computation, keeping data as dask arrays This operation can be used to trigger computation on underlying dask @@ -1037,7 +1037,7 @@ def persist(self: T_Dataset, **kwargs) -> T_Dataset: @classmethod def _construct_direct( - cls: type[T_Dataset], + cls, variables: dict[Any, Variable], coord_names: set[Hashable], dims: dict[Any, int] | None = None, @@ -1045,7 +1045,7 @@ def _construct_direct( indexes: dict[Any, Index] | None = None, encoding: dict | None = None, close: Callable[[], None] | None = None, - ) -> T_Dataset: + ) -> Self: """Shortcut around __init__ for internal use when we want to skip costly validation """ @@ -1064,7 +1064,7 @@ def _construct_direct( return obj def _replace( - self: T_Dataset, + self, variables: dict[Hashable, Variable] | None = None, coord_names: set[Hashable] | None = None, dims: dict[Any, int] | None = None, @@ -1072,7 +1072,7 @@ def _replace( indexes: dict[Hashable, Index] | None = None, encoding: dict | None | Default = _default, inplace: bool = False, - ) -> T_Dataset: + ) -> Self: """Fastpath constructor for internal use. Returns an object with optionally with replaced attributes. @@ -1114,13 +1114,13 @@ def _replace( return obj def _replace_with_new_dims( - self: T_Dataset, + self, variables: dict[Hashable, Variable], coord_names: set | None = None, attrs: dict[Hashable, Any] | None | Default = _default, indexes: dict[Hashable, Index] | None = None, inplace: bool = False, - ) -> T_Dataset: + ) -> Self: """Replace variables with recalculated dimensions.""" dims = calculate_dimensions(variables) return self._replace( @@ -1128,13 +1128,13 @@ def _replace_with_new_dims( ) def _replace_vars_and_dims( - self: T_Dataset, + self, variables: dict[Hashable, Variable], coord_names: set | None = None, dims: dict[Hashable, int] | None = None, attrs: dict[Hashable, Any] | None | Default = _default, inplace: bool = False, - ) -> T_Dataset: + ) -> Self: """Deprecated version of _replace_with_new_dims(). Unlike _replace_with_new_dims(), this method always recalculates @@ -1147,13 +1147,13 @@ def _replace_vars_and_dims( ) def _overwrite_indexes( - self: T_Dataset, + self, indexes: Mapping[Hashable, Index], variables: Mapping[Hashable, Variable] | None = None, drop_variables: list[Hashable] | None = None, drop_indexes: list[Hashable] | None = None, rename_dims: Mapping[Hashable, Hashable] | None = None, - ) -> T_Dataset: + ) -> Self: """Maybe replace indexes. This function may do a lot more depending on index query @@ -1221,8 +1221,8 @@ def _overwrite_indexes( return replaced def copy( - self: T_Dataset, deep: bool = False, data: Mapping[Any, ArrayLike] | None = None - ) -> T_Dataset: + self, deep: bool = False, data: Mapping[Any, ArrayLike] | None = None + ) -> Self: """Returns a copy of this dataset. If `deep=True`, a deep copy is made of each of the component variables. @@ -1322,11 +1322,11 @@ def copy( return self._copy(deep=deep, data=data) def _copy( - self: T_Dataset, + self, deep: bool = False, data: Mapping[Any, ArrayLike] | None = None, memo: dict[int, Any] | None = None, - ) -> T_Dataset: + ) -> Self: if data is None: data = {} elif not utils.is_dict_like(data): @@ -1364,13 +1364,13 @@ def _copy( return self._replace(variables, indexes=indexes, attrs=attrs, encoding=encoding) - def __copy__(self: T_Dataset) -> T_Dataset: + def __copy__(self) -> Self: return self._copy(deep=False) - def __deepcopy__(self: T_Dataset, memo: dict[int, Any] | None = None) -> T_Dataset: + def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Self: return self._copy(deep=True, memo=memo) - def as_numpy(self: T_Dataset) -> T_Dataset: + def as_numpy(self) -> Self: """ Coerces wrapped data and coordinates into numpy arrays, returning a Dataset. @@ -1382,7 +1382,7 @@ def as_numpy(self: T_Dataset) -> T_Dataset: numpy_variables = {k: v.as_numpy() for k, v in self.variables.items()} return self._replace(variables=numpy_variables) - def _copy_listed(self: T_Dataset, names: Iterable[Hashable]) -> T_Dataset: + def _copy_listed(self, names: Iterable[Hashable]) -> Self: """Create a new Dataset with the listed variables from this dataset and the all relevant coordinates. Skips all validation. """ @@ -1495,7 +1495,7 @@ def nbytes(self) -> int: return sum(v.nbytes for v in self.variables.values()) @property - def loc(self: T_Dataset) -> _LocIndexer[T_Dataset]: + def loc(self) -> _LocIndexer[Self]: """Attribute for location based indexing. Only supports __getitem__, and only when the key is a dict of the form {dim: labels}. """ @@ -1507,12 +1507,12 @@ def __getitem__(self, key: Hashable) -> DataArray: # Mapping is Iterable @overload - def __getitem__(self: T_Dataset, key: Iterable[Hashable]) -> T_Dataset: + def __getitem__(self, key: Iterable[Hashable]) -> Self: ... def __getitem__( - self: T_Dataset, key: Mapping[Any, Any] | Hashable | Iterable[Hashable] - ) -> T_Dataset | DataArray: + self, key: Mapping[Any, Any] | Hashable | Iterable[Hashable] + ) -> Self | DataArray: """Access variables or coordinates of this dataset as a :py:class:`~xarray.DataArray` or a subset of variables or a indexed dataset. @@ -1677,7 +1677,7 @@ def __delitem__(self, key: Hashable) -> None: # https://github.com/python/mypy/issues/4266 __hash__ = None # type: ignore[assignment] - def _all_compat(self, other: Dataset, compat_str: str) -> bool: + def _all_compat(self, other: Self, compat_str: str) -> bool: """Helper function for equals and identical""" # some stores (e.g., scipy) do not seem to preserve order, so don't @@ -1689,7 +1689,7 @@ def compat(x: Variable, y: Variable) -> bool: self._variables, other._variables, compat=compat ) - def broadcast_equals(self, other: Dataset) -> bool: + def broadcast_equals(self, other: Self) -> bool: """Two Datasets are broadcast equal if they are equal after broadcasting all variables against each other. @@ -1756,7 +1756,7 @@ def broadcast_equals(self, other: Dataset) -> bool: except (TypeError, AttributeError): return False - def equals(self, other: Dataset) -> bool: + def equals(self, other: Self) -> bool: """Two Datasets are equal if they have matching variables and coordinates, all of which are equal. @@ -1837,7 +1837,7 @@ def equals(self, other: Dataset) -> bool: except (TypeError, AttributeError): return False - def identical(self, other: Dataset) -> bool: + def identical(self, other: Self) -> bool: """Like equals, but also checks all dataset attributes and the attributes on all variables and coordinates. @@ -1950,7 +1950,7 @@ def data_vars(self) -> DataVariables: """Dictionary of DataArray objects corresponding to data variables""" return DataVariables(self) - def set_coords(self: T_Dataset, names: Hashable | Iterable[Hashable]) -> T_Dataset: + def set_coords(self, names: Hashable | Iterable[Hashable]) -> Self: """Given names of one or more variables, set them as coordinates Parameters @@ -2008,10 +2008,10 @@ def set_coords(self: T_Dataset, names: Hashable | Iterable[Hashable]) -> T_Datas return obj def reset_coords( - self: T_Dataset, + self, names: Dims = None, drop: bool = False, - ) -> T_Dataset: + ) -> Self: """Given names of coordinates, reset them to become variables Parameters @@ -2562,7 +2562,7 @@ def chunksizes(self) -> Mapping[Hashable, tuple[int, ...]]: return get_chunksizes(self.variables.values()) def chunk( - self: T_Dataset, + self, chunks: ( int | Literal["auto"] | Mapping[Any, None | int | str | tuple[int, ...]] ) = {}, # {} even though it's technically unsafe, is being used intentionally here (#4667) @@ -2573,7 +2573,7 @@ def chunk( chunked_array_type: str | ChunkManagerEntrypoint | None = None, from_array_kwargs=None, **chunks_kwargs: None | int | str | tuple[int, ...], - ) -> T_Dataset: + ) -> Self: """Coerce all arrays in this dataset into dask arrays with the given chunks. @@ -2767,12 +2767,12 @@ def _get_indexers_coords_and_indexes(self, indexers): return attached_coords, attached_indexes def isel( - self: T_Dataset, + self, indexers: Mapping[Any, Any] | None = None, drop: bool = False, missing_dims: ErrorOptionsWithWarn = "raise", **indexers_kwargs: Any, - ) -> T_Dataset: + ) -> Self: """Returns a new dataset with each array indexed along the specified dimension(s). @@ -2915,12 +2915,12 @@ def isel( ) def _isel_fancy( - self: T_Dataset, + self, indexers: Mapping[Any, Any], *, drop: bool, missing_dims: ErrorOptionsWithWarn = "raise", - ) -> T_Dataset: + ) -> Self: valid_indexers = dict(self._validate_indexers(indexers, missing_dims)) variables: dict[Hashable, Variable] = {} @@ -2956,13 +2956,13 @@ def _isel_fancy( return self._replace_with_new_dims(variables, coord_names, indexes=indexes) def sel( - self: T_Dataset, + self, indexers: Mapping[Any, Any] | None = None, method: str | None = None, tolerance: int | float | Iterable[int | float] | None = None, drop: bool = False, **indexers_kwargs: Any, - ) -> T_Dataset: + ) -> Self: """Returns a new dataset with each array indexed by tick labels along the specified dimension(s). @@ -3042,10 +3042,10 @@ def sel( return result._overwrite_indexes(*query_results.as_tuple()[1:]) def head( - self: T_Dataset, + self, indexers: Mapping[Any, int] | int | None = None, **indexers_kwargs: Any, - ) -> T_Dataset: + ) -> Self: """Returns a new dataset with the first `n` values of each array for the specified dimension(s). @@ -3132,10 +3132,10 @@ def head( return self.isel(indexers_slices) def tail( - self: T_Dataset, + self, indexers: Mapping[Any, int] | int | None = None, **indexers_kwargs: Any, - ) -> T_Dataset: + ) -> Self: """Returns a new dataset with the last `n` values of each array for the specified dimension(s). @@ -3223,10 +3223,10 @@ def tail( return self.isel(indexers_slices) def thin( - self: T_Dataset, + self, indexers: Mapping[Any, int] | int | None = None, **indexers_kwargs: Any, - ) -> T_Dataset: + ) -> Self: """Returns a new dataset with each array indexed along every `n`-th value for the specified dimension(s) @@ -3308,10 +3308,10 @@ def thin( return self.isel(indexers_slices) def broadcast_like( - self: T_Dataset, - other: Dataset | DataArray, + self, + other: T_DataArrayOrSet, exclude: Iterable[Hashable] | None = None, - ) -> T_Dataset: + ) -> Self: """Broadcast this DataArray against another Dataset or DataArray. This is equivalent to xr.broadcast(other, self)[1] @@ -3331,12 +3331,10 @@ def broadcast_like( dims_map, common_coords = _get_broadcast_dims_map_common_coords(args, exclude) - return _broadcast_helper( - cast("T_Dataset", args[1]), exclude, dims_map, common_coords - ) + return _broadcast_helper(args[1], exclude, dims_map, common_coords) def _reindex_callback( - self: T_Dataset, + self, aligner: alignment.Aligner, dim_pos_indexers: dict[Hashable, Any], variables: dict[Hashable, Variable], @@ -3344,7 +3342,7 @@ def _reindex_callback( fill_value: Any, exclude_dims: frozenset[Hashable], exclude_vars: frozenset[Hashable], - ) -> T_Dataset: + ) -> Self: """Callback called from ``Aligner`` to create a new reindexed Dataset.""" new_variables = variables.copy() @@ -3397,13 +3395,13 @@ def _reindex_callback( return reindexed def reindex_like( - self: T_Dataset, - other: Dataset | DataArray, + self, + other: T_Xarray, method: ReindexMethodOptions = None, tolerance: int | float | Iterable[int | float] | None = None, copy: bool = True, fill_value: Any = xrdtypes.NA, - ) -> T_Dataset: + ) -> Self: """Conform this object onto the indexes of another object, filling in missing values with ``fill_value``. The default fill value is NaN. @@ -3463,14 +3461,14 @@ def reindex_like( ) def reindex( - self: T_Dataset, + self, indexers: Mapping[Any, Any] | None = None, method: ReindexMethodOptions = None, tolerance: int | float | Iterable[int | float] | None = None, copy: bool = True, fill_value: Any = xrdtypes.NA, **indexers_kwargs: Any, - ) -> T_Dataset: + ) -> Self: """Conform this object onto a new set of indexes, filling in missing values with ``fill_value``. The default fill value is NaN. @@ -3679,7 +3677,7 @@ def reindex( ) def _reindex( - self: T_Dataset, + self, indexers: Mapping[Any, Any] | None = None, method: str | None = None, tolerance: int | float | Iterable[int | float] | None = None, @@ -3687,7 +3685,7 @@ def _reindex( fill_value: Any = xrdtypes.NA, sparse: bool = False, **indexers_kwargs: Any, - ) -> T_Dataset: + ) -> Self: """ Same as reindex but supports sparse option. """ @@ -3703,14 +3701,14 @@ def _reindex( ) def interp( - self: T_Dataset, + self, coords: Mapping[Any, Any] | None = None, method: InterpOptions = "linear", assume_sorted: bool = False, kwargs: Mapping[str, Any] | None = None, method_non_numeric: str = "nearest", **coords_kwargs: Any, - ) -> T_Dataset: + ) -> Self: """Interpolate a Dataset onto new coordinates Performs univariate or multivariate interpolation of a Dataset onto @@ -3983,12 +3981,12 @@ def _validate_interp_indexer(x, new_x): def interp_like( self, - other: Dataset | DataArray, + other: T_Xarray, method: InterpOptions = "linear", assume_sorted: bool = False, kwargs: Mapping[str, Any] | None = None, method_non_numeric: str = "nearest", - ) -> Dataset: + ) -> Self: """Interpolate this object onto the coordinates of another object, filling the out of range values with NaN. @@ -4138,10 +4136,10 @@ def _rename_all( return variables, coord_names, dims, indexes def _rename( - self: T_Dataset, + self, name_dict: Mapping[Any, Hashable] | None = None, **names: Hashable, - ) -> T_Dataset: + ) -> Self: """Also used internally by DataArray so that the warning (if any) is raised at the right stack level. """ @@ -4180,10 +4178,10 @@ def _rename( return self._replace(variables, coord_names, dims=dims, indexes=indexes) def rename( - self: T_Dataset, + self, name_dict: Mapping[Any, Hashable] | None = None, **names: Hashable, - ) -> T_Dataset: + ) -> Self: """Returns a new object with renamed variables, coordinates and dimensions. Parameters @@ -4210,10 +4208,10 @@ def rename( return self._rename(name_dict=name_dict, **names) def rename_dims( - self: T_Dataset, + self, dims_dict: Mapping[Any, Hashable] | None = None, **dims: Hashable, - ) -> T_Dataset: + ) -> Self: """Returns a new object with renamed dimensions only. Parameters @@ -4257,10 +4255,10 @@ def rename_dims( return self._replace(variables, coord_names, dims=sizes, indexes=indexes) def rename_vars( - self: T_Dataset, + self, name_dict: Mapping[Any, Hashable] | None = None, **names: Hashable, - ) -> T_Dataset: + ) -> Self: """Returns a new object with renamed variables including coordinates Parameters @@ -4297,8 +4295,8 @@ def rename_vars( return self._replace(variables, coord_names, dims=dims, indexes=indexes) def swap_dims( - self: T_Dataset, dims_dict: Mapping[Any, Hashable] | None = None, **dims_kwargs - ) -> T_Dataset: + self, dims_dict: Mapping[Any, Hashable] | None = None, **dims_kwargs + ) -> Self: """Returns a new object with swapped dimensions. Parameters @@ -4401,14 +4399,12 @@ def swap_dims( return self._replace_with_new_dims(variables, coord_names, indexes=indexes) - # change type of self and return to T_Dataset once - # https://github.com/python/mypy/issues/12846 is resolved def expand_dims( self, dim: None | Hashable | Sequence[Hashable] | Mapping[Any, Any] = None, axis: None | int | Sequence[int] = None, **dim_kwargs: Any, - ) -> Dataset: + ) -> Self: """Return a new object with an additional axis (or axes) inserted at the corresponding position in the array shape. The new object is a view into the underlying array, not a copy. @@ -4598,14 +4594,12 @@ def expand_dims( variables, coord_names=coord_names, indexes=indexes ) - # change type of self and return to T_Dataset once - # https://github.com/python/mypy/issues/12846 is resolved def set_index( self, indexes: Mapping[Any, Hashable | Sequence[Hashable]] | None = None, append: bool = False, **indexes_kwargs: Hashable | Sequence[Hashable], - ) -> Dataset: + ) -> Self: """Set Dataset (multi-)indexes using one or more existing coordinates or variables. @@ -4766,10 +4760,10 @@ def set_index( ) def reset_index( - self: T_Dataset, + self, dims_or_levels: Hashable | Sequence[Hashable], drop: bool = False, - ) -> T_Dataset: + ) -> Self: """Reset the specified index(es) or multi-index level(s). This legacy method is specific to pandas (multi-)indexes and @@ -4877,11 +4871,11 @@ def drop_or_convert(var_names): ) def set_xindex( - self: T_Dataset, + self, coord_names: str | Sequence[Hashable], index_cls: type[Index] | None = None, **options, - ) -> T_Dataset: + ) -> Self: """Set a new, Xarray-compatible index from one or more existing coordinate(s). @@ -4989,10 +4983,10 @@ def set_xindex( ) def reorder_levels( - self: T_Dataset, + self, dim_order: Mapping[Any, Sequence[int | Hashable]] | None = None, **dim_order_kwargs: Sequence[int | Hashable], - ) -> T_Dataset: + ) -> Self: """Rearrange index levels using input order. Parameters @@ -5093,12 +5087,12 @@ def _get_stack_index( return stack_index, stack_coords def _stack_once( - self: T_Dataset, + self, dims: Sequence[Hashable | ellipsis], new_dim: Hashable, index_cls: type[Index], create_index: bool | None = True, - ) -> T_Dataset: + ) -> Self: if dims == ...: raise ValueError("Please use [...] for dims, rather than just ...") if ... in dims: @@ -5152,12 +5146,12 @@ def _stack_once( ) def stack( - self: T_Dataset, + self, dimensions: Mapping[Any, Sequence[Hashable | ellipsis]] | None = None, create_index: bool | None = True, index_cls: type[Index] = PandasMultiIndex, **dimensions_kwargs: Sequence[Hashable | ellipsis], - ) -> T_Dataset: + ) -> Self: """ Stack any number of existing dimensions into a single new dimension. @@ -5312,12 +5306,12 @@ def stack_dataarray(da): return data_array def _unstack_once( - self: T_Dataset, + self, dim: Hashable, index_and_vars: tuple[Index, dict[Hashable, Variable]], fill_value, sparse: bool = False, - ) -> T_Dataset: + ) -> Self: index, index_vars = index_and_vars variables: dict[Hashable, Variable] = {} indexes = {k: v for k, v in self._indexes.items() if k != dim} @@ -5352,12 +5346,12 @@ def _unstack_once( ) def _unstack_full_reindex( - self: T_Dataset, + self, dim: Hashable, index_and_vars: tuple[Index, dict[Hashable, Variable]], fill_value, sparse: bool, - ) -> T_Dataset: + ) -> Self: index, index_vars = index_and_vars variables: dict[Hashable, Variable] = {} indexes = {k: v for k, v in self._indexes.items() if k != dim} @@ -5403,11 +5397,11 @@ def _unstack_full_reindex( ) def unstack( - self: T_Dataset, + self, dim: Dims = None, fill_value: Any = xrdtypes.NA, sparse: bool = False, - ) -> T_Dataset: + ) -> Self: """ Unstack existing dimensions corresponding to MultiIndexes into multiple new dimensions. @@ -5504,7 +5498,7 @@ def unstack( result = result._unstack_once(d, stacked_indexes[d], fill_value, sparse) return result - def update(self: T_Dataset, other: CoercibleMapping) -> T_Dataset: + def update(self, other: CoercibleMapping) -> Self: """Update this dataset's variables with those from another dataset. Just like :py:meth:`dict.update` this is a in-place operation. @@ -5544,14 +5538,14 @@ def update(self: T_Dataset, other: CoercibleMapping) -> T_Dataset: return self._replace(inplace=True, **merge_result._asdict()) def merge( - self: T_Dataset, + self, other: CoercibleMapping | DataArray, overwrite_vars: Hashable | Iterable[Hashable] = frozenset(), compat: CompatOptions = "no_conflicts", join: JoinOptions = "outer", fill_value: Any = xrdtypes.NA, combine_attrs: CombineAttrsOptions = "override", - ) -> T_Dataset: + ) -> Self: """Merge the arrays of two datasets into a single dataset. This method generally does not allow for overriding data, with the @@ -5655,11 +5649,11 @@ def _assert_all_in_dataset( ) def drop_vars( - self: T_Dataset, + self, names: Hashable | Iterable[Hashable], *, errors: ErrorOptions = "raise", - ) -> T_Dataset: + ) -> Self: """Drop variables from this dataset. Parameters @@ -5801,11 +5795,11 @@ def drop_vars( ) def drop_indexes( - self: T_Dataset, + self, coord_names: Hashable | Iterable[Hashable], *, errors: ErrorOptions = "raise", - ) -> T_Dataset: + ) -> Self: """Drop the indexes assigned to the given coordinates. Parameters @@ -5857,13 +5851,13 @@ def drop_indexes( return self._replace(variables=variables, indexes=indexes) def drop( - self: T_Dataset, + self, labels=None, dim=None, *, errors: ErrorOptions = "raise", **labels_kwargs, - ) -> T_Dataset: + ) -> Self: """Backward compatible method based on `drop_vars` and `drop_sel` Using either `drop_vars` or `drop_sel` is encouraged @@ -5913,8 +5907,8 @@ def drop( return self.drop_sel(labels, errors=errors) def drop_sel( - self: T_Dataset, labels=None, *, errors: ErrorOptions = "raise", **labels_kwargs - ) -> T_Dataset: + self, labels=None, *, errors: ErrorOptions = "raise", **labels_kwargs + ) -> Self: """Drop index labels from this dataset. Parameters @@ -5983,7 +5977,7 @@ def drop_sel( ds = ds.loc[{dim: new_index}] return ds - def drop_isel(self: T_Dataset, indexers=None, **indexers_kwargs) -> T_Dataset: + def drop_isel(self, indexers=None, **indexers_kwargs) -> Self: """Drop index positions from this Dataset. Parameters @@ -6049,11 +6043,11 @@ def drop_isel(self: T_Dataset, indexers=None, **indexers_kwargs) -> T_Dataset: return ds def drop_dims( - self: T_Dataset, + self, drop_dims: str | Iterable[Hashable], *, errors: ErrorOptions = "raise", - ) -> T_Dataset: + ) -> Self: """Drop dimensions and associated variables from this dataset. Parameters @@ -6090,10 +6084,10 @@ def drop_dims( return self.drop_vars(drop_vars) def transpose( - self: T_Dataset, + self, *dims: Hashable, missing_dims: ErrorOptionsWithWarn = "raise", - ) -> T_Dataset: + ) -> Self: """Return a new Dataset object with all array dimensions transposed. Although the order of dimensions on each array will change, the dataset @@ -6146,12 +6140,12 @@ def transpose( return ds def dropna( - self: T_Dataset, + self, dim: Hashable, how: Literal["any", "all"] = "any", thresh: int | None = None, subset: Iterable[Hashable] | None = None, - ) -> T_Dataset: + ) -> Self: """Returns a new dataset with dropped labels for missing values along the provided dimension. @@ -6273,7 +6267,7 @@ def dropna( return self.isel({dim: mask}) - def fillna(self: T_Dataset, value: Any) -> T_Dataset: + def fillna(self, value: Any) -> Self: """Fill missing values in this object. This operation follows the normal broadcasting and alignment rules that @@ -6354,7 +6348,7 @@ def fillna(self: T_Dataset, value: Any) -> T_Dataset: return out def interpolate_na( - self: T_Dataset, + self, dim: Hashable | None = None, method: InterpOptions = "linear", limit: int | None = None, @@ -6363,7 +6357,7 @@ def interpolate_na( int | float | str | pd.Timedelta | np.timedelta64 | datetime.timedelta ) = None, **kwargs: Any, - ) -> T_Dataset: + ) -> Self: """Fill in NaNs by interpolating according to different methods. Parameters @@ -6493,7 +6487,7 @@ def interpolate_na( ) return new - def ffill(self: T_Dataset, dim: Hashable, limit: int | None = None) -> T_Dataset: + def ffill(self, dim: Hashable, limit: int | None = None) -> Self: """Fill NaN values by propagating values forward *Requires bottleneck.* @@ -6557,7 +6551,7 @@ def ffill(self: T_Dataset, dim: Hashable, limit: int | None = None) -> T_Dataset new = _apply_over_vars_with_dim(ffill, self, dim=dim, limit=limit) return new - def bfill(self: T_Dataset, dim: Hashable, limit: int | None = None) -> T_Dataset: + def bfill(self, dim: Hashable, limit: int | None = None) -> Self: """Fill NaN values by propagating values backward *Requires bottleneck.* @@ -6622,7 +6616,7 @@ def bfill(self: T_Dataset, dim: Hashable, limit: int | None = None) -> T_Dataset new = _apply_over_vars_with_dim(bfill, self, dim=dim, limit=limit) return new - def combine_first(self: T_Dataset, other: T_Dataset) -> T_Dataset: + def combine_first(self, other: Self) -> Self: """Combine two Datasets, default to data_vars of self. The new coordinates follow the normal broadcasting and alignment rules @@ -6642,7 +6636,7 @@ def combine_first(self: T_Dataset, other: T_Dataset) -> T_Dataset: return out def reduce( - self: T_Dataset, + self, func: Callable, dim: Dims = None, *, @@ -6650,7 +6644,7 @@ def reduce( keepdims: bool = False, numeric_only: bool = False, **kwargs: Any, - ) -> T_Dataset: + ) -> Self: """Reduce this dataset by applying `func` along some dimension(s). Parameters @@ -6775,12 +6769,12 @@ def reduce( ) def map( - self: T_Dataset, + self, func: Callable, keep_attrs: bool | None = None, args: Iterable[Any] = (), **kwargs: Any, - ) -> T_Dataset: + ) -> Self: """Apply a function to each data variable in this dataset Parameters @@ -6835,12 +6829,12 @@ def map( return type(self)(variables, attrs=attrs) def apply( - self: T_Dataset, + self, func: Callable, keep_attrs: bool | None = None, args: Iterable[Any] = (), **kwargs: Any, - ) -> T_Dataset: + ) -> Self: """ Backward compatible implementation of ``map`` @@ -6856,10 +6850,10 @@ def apply( return self.map(func, keep_attrs, args, **kwargs) def assign( - self: T_Dataset, + self, variables: Mapping[Any, Any] | None = None, **variables_kwargs: Any, - ) -> T_Dataset: + ) -> Self: """Assign new data variables to a Dataset, returning a new object with all the original variables in addition to the new ones. @@ -7164,9 +7158,7 @@ def _set_numpy_data_from_dataframe( self[name] = (dims, data) @classmethod - def from_dataframe( - cls: type[T_Dataset], dataframe: pd.DataFrame, sparse: bool = False - ) -> T_Dataset: + def from_dataframe(cls, dataframe: pd.DataFrame, sparse: bool = False) -> Self: """Convert a pandas.DataFrame into an xarray.Dataset Each column will be converted into an independent variable in the @@ -7380,7 +7372,7 @@ def to_dict( return d @classmethod - def from_dict(cls: type[T_Dataset], d: Mapping[Any, Any]) -> T_Dataset: + def from_dict(cls, d: Mapping[Any, Any]) -> Self: """Convert a dictionary into an xarray.Dataset. Parameters @@ -7470,7 +7462,7 @@ def from_dict(cls: type[T_Dataset], d: Mapping[Any, Any]) -> T_Dataset: return obj - def _unary_op(self: T_Dataset, f, *args, **kwargs) -> T_Dataset: + def _unary_op(self, f, *args, **kwargs) -> Self: variables = {} keep_attrs = kwargs.pop("keep_attrs", None) if keep_attrs is None: @@ -7501,7 +7493,7 @@ def _binary_op(self, other, f, reflexive=False, join=None) -> Dataset: ds.attrs = self.attrs return ds - def _inplace_binary_op(self: T_Dataset, other, f) -> T_Dataset: + def _inplace_binary_op(self, other, f) -> Self: from xarray.core.dataarray import DataArray from xarray.core.groupby import GroupBy @@ -7576,11 +7568,11 @@ def _copy_attrs_from(self, other): self.variables[v].attrs = other.variables[v].attrs def diff( - self: T_Dataset, + self, dim: Hashable, n: int = 1, label: Literal["upper", "lower"] = "upper", - ) -> T_Dataset: + ) -> Self: """Calculate the n-th order discrete difference along given axis. Parameters @@ -7663,11 +7655,11 @@ def diff( return difference def shift( - self: T_Dataset, + self, shifts: Mapping[Any, int] | None = None, fill_value: Any = xrdtypes.NA, **shifts_kwargs: int, - ) -> T_Dataset: + ) -> Self: """Shift this dataset by an offset along one or more dimensions. Only data variables are moved; coordinates stay in place. This is @@ -7734,11 +7726,11 @@ def shift( return self._replace(variables) def roll( - self: T_Dataset, + self, shifts: Mapping[Any, int] | None = None, roll_coords: bool = False, **shifts_kwargs: int, - ) -> T_Dataset: + ) -> Self: """Roll this dataset by an offset along one or more dimensions. Unlike shift, roll treats the given dimensions as periodic, so will not @@ -7820,10 +7812,10 @@ def roll( return self._replace(variables, indexes=indexes) def sortby( - self: T_Dataset, + self, variables: Hashable | DataArray | list[Hashable | DataArray], ascending: bool = True, - ) -> T_Dataset: + ) -> Self: """ Sort object by labels or values (along an axis). @@ -7890,7 +7882,7 @@ def sortby( variables = variables arrays = [v if isinstance(v, DataArray) else self[v] for v in variables] aligned_vars = align(self, *arrays, join="left") # type: ignore[type-var] - aligned_self: T_Dataset = aligned_vars[0] # type: ignore[assignment] + aligned_self = cast(Self, aligned_vars[0]) aligned_other_vars: tuple[DataArray, ...] = aligned_vars[1:] # type: ignore[assignment] vars_by_dim = defaultdict(list) for data_array in aligned_other_vars: @@ -7906,7 +7898,7 @@ def sortby( return aligned_self.isel(indices) def quantile( - self: T_Dataset, + self, q: ArrayLike, dim: Dims = None, method: QuantileMethods = "linear", @@ -7914,7 +7906,7 @@ def quantile( keep_attrs: bool | None = None, skipna: bool | None = None, interpolation: QuantileMethods | None = None, - ) -> T_Dataset: + ) -> Self: """Compute the qth quantile of the data along the specified dimension. Returns the qth quantiles(s) of the array elements for each variable @@ -8084,11 +8076,11 @@ def quantile( return new.assign_coords(quantile=q) def rank( - self: T_Dataset, + self, dim: Hashable, pct: bool = False, keep_attrs: bool | None = None, - ) -> T_Dataset: + ) -> Self: """Ranks the data. Equal values are assigned a rank that is the average of the ranks that @@ -8142,11 +8134,11 @@ def rank( return self._replace(variables, coord_names, attrs=attrs) def differentiate( - self: T_Dataset, + self, coord: Hashable, edge_order: Literal[1, 2] = 1, datetime_unit: DatetimeUnitOptions | None = None, - ) -> T_Dataset: + ) -> Self: """ Differentiate with the second order accurate central differences. @@ -8214,10 +8206,10 @@ def differentiate( return self._replace(variables) def integrate( - self: T_Dataset, + self, coord: Hashable | Sequence[Hashable], datetime_unit: DatetimeUnitOptions = None, - ) -> T_Dataset: + ) -> Self: """Integrate along the given coordinate using the trapezoidal rule. .. note:: @@ -8333,10 +8325,10 @@ def _integrate_one(self, coord, datetime_unit=None, cumulative=False): ) def cumulative_integrate( - self: T_Dataset, + self, coord: Hashable | Sequence[Hashable], datetime_unit: DatetimeUnitOptions = None, - ) -> T_Dataset: + ) -> Self: """Integrate along the given coordinate using the trapezoidal rule. .. note:: @@ -8408,7 +8400,7 @@ def cumulative_integrate( return result @property - def real(self: T_Dataset) -> T_Dataset: + def real(self) -> Self: """ The real part of each data variable. @@ -8419,7 +8411,7 @@ def real(self: T_Dataset) -> T_Dataset: return self.map(lambda x: x.real, keep_attrs=True) @property - def imag(self: T_Dataset) -> T_Dataset: + def imag(self) -> Self: """ The imaginary part of each data variable. @@ -8431,7 +8423,7 @@ def imag(self: T_Dataset) -> T_Dataset: plot = utils.UncachedAccessor(DatasetPlotAccessor) - def filter_by_attrs(self: T_Dataset, **kwargs) -> T_Dataset: + def filter_by_attrs(self, **kwargs) -> Self: """Returns a ``Dataset`` with variables that match specific conditions. Can pass in ``key=value`` or ``key=callable``. A Dataset is returned @@ -8526,7 +8518,7 @@ def filter_by_attrs(self: T_Dataset, **kwargs) -> T_Dataset: selection.append(var_name) return self[selection] - def unify_chunks(self: T_Dataset) -> T_Dataset: + def unify_chunks(self) -> Self: """Unify chunk size along all chunked dimensions of this Dataset. Returns @@ -8648,7 +8640,7 @@ def map_blocks( return map_blocks(func, self, args, kwargs, template) def polyfit( - self: T_Dataset, + self, dim: Hashable, deg: int, skipna: bool | None = None, @@ -8656,7 +8648,7 @@ def polyfit( w: Hashable | Any = None, full: bool = False, cov: bool | Literal["unscaled"] = False, - ) -> T_Dataset: + ) -> Self: """ Least squares polynomial fit. @@ -8844,7 +8836,7 @@ def polyfit( return type(self)(data_vars=variables, attrs=self.attrs.copy()) def pad( - self: T_Dataset, + self, pad_width: Mapping[Any, int | tuple[int, int]] | None = None, mode: PadModeOptions = "constant", stat_length: int @@ -8858,7 +8850,7 @@ def pad( reflect_type: PadReflectOptions = None, keep_attrs: bool | None = None, **pad_width_kwargs: Any, - ) -> T_Dataset: + ) -> Self: """Pad this dataset along one or more dimensions. .. warning:: @@ -9030,12 +9022,12 @@ def pad( return self._replace_with_new_dims(variables, indexes=indexes, attrs=attrs) def idxmin( - self: T_Dataset, + self, dim: Hashable | None = None, skipna: bool | None = None, fill_value: Any = xrdtypes.NA, keep_attrs: bool | None = None, - ) -> T_Dataset: + ) -> Self: """Return the coordinate label of the minimum value along a dimension. Returns a new `Dataset` named after the dimension with the values of @@ -9127,12 +9119,12 @@ def idxmin( ) def idxmax( - self: T_Dataset, + self, dim: Hashable | None = None, skipna: bool | None = None, fill_value: Any = xrdtypes.NA, keep_attrs: bool | None = None, - ) -> T_Dataset: + ) -> Self: """Return the coordinate label of the maximum value along a dimension. Returns a new `Dataset` named after the dimension with the values of @@ -9223,7 +9215,7 @@ def idxmax( ) ) - def argmin(self: T_Dataset, dim: Hashable | None = None, **kwargs) -> T_Dataset: + def argmin(self, dim: Hashable | None = None, **kwargs) -> Self: """Indices of the minima of the member variables. If there are multiple minima, the indices of the first one found will be @@ -9326,7 +9318,7 @@ def argmin(self: T_Dataset, dim: Hashable | None = None, **kwargs) -> T_Dataset: "Dataset.argmin() with a sequence or ... for dim" ) - def argmax(self: T_Dataset, dim: Hashable | None = None, **kwargs) -> T_Dataset: + def argmax(self, dim: Hashable | None = None, **kwargs) -> Self: """Indices of the maxima of the member variables. If there are multiple maxima, the indices of the first one found will be @@ -9420,13 +9412,13 @@ def argmax(self: T_Dataset, dim: Hashable | None = None, **kwargs) -> T_Dataset: ) def query( - self: T_Dataset, + self, queries: Mapping[Any, Any] | None = None, parser: QueryParserOptions = "pandas", engine: QueryEngineOptions = None, missing_dims: ErrorOptionsWithWarn = "raise", **queries_kwargs: Any, - ) -> T_Dataset: + ) -> Self: """Return a new dataset with each array indexed along the specified dimension(s), where the indexers are given as strings containing Python expressions to be evaluated against the data variables in the @@ -9516,7 +9508,7 @@ def query( return self.isel(indexers, missing_dims=missing_dims) def curvefit( - self: T_Dataset, + self, coords: str | DataArray | Iterable[str | DataArray], func: Callable[..., Any], reduce_dims: Dims = None, @@ -9526,7 +9518,7 @@ def curvefit( param_names: Sequence[str] | None = None, errors: ErrorOptions = "raise", kwargs: dict[str, Any] | None = None, - ) -> T_Dataset: + ) -> Self: """ Curve fitting optimization for arbitrary functions. @@ -9750,10 +9742,10 @@ def _wrapper(Y, *args, **kwargs): return result def drop_duplicates( - self: T_Dataset, + self, dim: Hashable | Iterable[Hashable], keep: Literal["first", "last", False] = "first", - ) -> T_Dataset: + ) -> Self: """Returns a new Dataset with duplicate dimension values removed. Parameters @@ -9793,13 +9785,13 @@ def drop_duplicates( return self.isel(indexes) def convert_calendar( - self: T_Dataset, + self, calendar: CFCalendar, dim: Hashable = "time", align_on: Literal["date", "year", None] = None, missing: Any | None = None, use_cftime: bool | None = None, - ) -> T_Dataset: + ) -> Self: """Convert the Dataset to another calendar. Only converts the individual timestamps, does not modify any data except @@ -9916,10 +9908,10 @@ def convert_calendar( ) def interp_calendar( - self: T_Dataset, + self, target: pd.DatetimeIndex | CFTimeIndex | DataArray, dim: Hashable = "time", - ) -> T_Dataset: + ) -> Self: """Interpolates the Dataset to another calendar based on decimal year measure. Each timestamp in `source` and `target` are first converted to their decimal diff --git a/xarray/core/variable.py b/xarray/core/variable.py index 1869cf2a0bd..2571b093450 100644 --- a/xarray/core/variable.py +++ b/xarray/core/variable.py @@ -66,8 +66,8 @@ PadModeOptions, PadReflectOptions, QuantileMethods, + Self, T_DuckArray, - T_Variable, ) NON_NANOSECOND_WARNING = ( @@ -420,7 +420,7 @@ def _in_memory(self): ) @property - def data(self: T_Variable): + def data(self): """ The Variable's data as an array. The underlying array type (e.g. dask, sparse, pint) is preserved. @@ -439,7 +439,7 @@ def data(self: T_Variable): return self.values @data.setter - def data(self: T_Variable, data: T_DuckArray | ArrayLike) -> None: + def data(self, data: T_DuckArray | ArrayLike) -> None: data = as_compatible_data(data) if data.shape != self.shape: # type: ignore[attr-defined] raise ValueError( @@ -449,7 +449,7 @@ def data(self: T_Variable, data: T_DuckArray | ArrayLike) -> None: self._data = data def astype( - self: T_Variable, + self, dtype, *, order=None, @@ -457,7 +457,7 @@ def astype( subok=None, copy=None, keep_attrs=True, - ) -> T_Variable: + ) -> Self: """ Copy of the Variable object, with data cast to a specified type. @@ -883,7 +883,7 @@ def _broadcast_indexes_vectorized(self, key): return out_dims, VectorizedIndexer(tuple(out_key)), new_order - def __getitem__(self: T_Variable, key) -> T_Variable: + def __getitem__(self, key) -> Self: """Return a new Variable object whose contents are consistent with getting the provided key from the underlying data. @@ -902,7 +902,7 @@ def __getitem__(self: T_Variable, key) -> T_Variable: data = np.moveaxis(data, range(len(new_order)), new_order) return self._finalize_indexing_result(dims, data) - def _finalize_indexing_result(self: T_Variable, dims, data) -> T_Variable: + def _finalize_indexing_result(self, dims, data) -> Self: """Used by IndexVariable to return IndexVariable objects when possible.""" return self._replace(dims=dims, data=data) @@ -1001,13 +1001,13 @@ def encoding(self, value): except ValueError: raise ValueError("encoding must be castable to a dictionary") - def reset_encoding(self: T_Variable) -> T_Variable: + def reset_encoding(self) -> Self: """Return a new Variable without encoding.""" return self._replace(encoding={}) def copy( - self: T_Variable, deep: bool = True, data: T_DuckArray | ArrayLike | None = None - ) -> T_Variable: + self, deep: bool = True, data: T_DuckArray | ArrayLike | None = None + ) -> Self: """Returns a copy of this object. If `deep=True`, the data array is loaded into memory and copied onto @@ -1066,11 +1066,11 @@ def copy( return self._copy(deep=deep, data=data) def _copy( - self: T_Variable, + self, deep: bool = True, data: T_DuckArray | ArrayLike | None = None, memo: dict[int, Any] | None = None, - ) -> T_Variable: + ) -> Self: if data is None: data_old = self._data @@ -1099,12 +1099,12 @@ def _copy( return self._replace(data=ndata, attrs=attrs, encoding=encoding) def _replace( - self: T_Variable, + self, dims=_default, data=_default, attrs=_default, encoding=_default, - ) -> T_Variable: + ) -> Self: if dims is _default: dims = copy.copy(self._dims) if data is _default: @@ -1115,12 +1115,10 @@ def _replace( encoding = copy.copy(self._encoding) return type(self)(dims, data, attrs, encoding, fastpath=True) - def __copy__(self: T_Variable) -> T_Variable: + def __copy__(self) -> Self: return self._copy(deep=False) - def __deepcopy__( - self: T_Variable, memo: dict[int, Any] | None = None - ) -> T_Variable: + def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Self: return self._copy(deep=True, memo=memo) # mutable objects should not be hashable @@ -1179,7 +1177,7 @@ def chunk( chunked_array_type: str | ChunkManagerEntrypoint | None = None, from_array_kwargs=None, **chunks_kwargs: Any, - ) -> Variable: + ) -> Self: """Coerce this array's data into a dask array with the given chunks. If this variable is a non-dask array, it will be converted to dask @@ -1310,7 +1308,7 @@ def to_numpy(self) -> np.ndarray: return data - def as_numpy(self: T_Variable) -> T_Variable: + def as_numpy(self) -> Self: """Coerces wrapped data into a numpy array, returning a Variable.""" return self._replace(data=self.to_numpy()) @@ -1345,11 +1343,11 @@ def _to_dense(self): return self.copy(deep=False) def isel( - self: T_Variable, + self, indexers: Mapping[Any, Any] | None = None, missing_dims: ErrorOptionsWithWarn = "raise", **indexers_kwargs: Any, - ) -> T_Variable: + ) -> Self: """Return a new array indexed along the specified dimension(s). Parameters @@ -1636,7 +1634,7 @@ def transpose( self, *dims: Hashable | ellipsis, missing_dims: ErrorOptionsWithWarn = "raise", - ) -> Variable: + ) -> Self: """Return a new Variable object with transposed dimensions. Parameters @@ -1681,7 +1679,7 @@ def transpose( return self._replace(dims=dims, data=data) @property - def T(self) -> Variable: + def T(self) -> Self: return self.transpose() def set_dims(self, dims, shape=None): @@ -1789,9 +1787,7 @@ def stack(self, dimensions=None, **dimensions_kwargs): result = result._stack_once(dims, new_dim) return result - def _unstack_once_full( - self, dims: Mapping[Any, int], old_dim: Hashable - ) -> Variable: + def _unstack_once_full(self, dims: Mapping[Any, int], old_dim: Hashable) -> Self: """ Unstacks the variable without needing an index. @@ -1824,7 +1820,9 @@ def _unstack_once_full( new_data = reordered.data.reshape(new_shape) new_dims = reordered.dims[: len(other_dims)] + new_dim_names - return Variable(new_dims, new_data, self._attrs, self._encoding, fastpath=True) + return type(self)( + new_dims, new_data, self._attrs, self._encoding, fastpath=True + ) def _unstack_once( self, @@ -1832,7 +1830,7 @@ def _unstack_once( dim: Hashable, fill_value=dtypes.NA, sparse: bool = False, - ) -> Variable: + ) -> Self: """ Unstacks this variable given an index to unstack and the name of the dimension to which the index refers. @@ -2044,6 +2042,8 @@ def reduce( keep_attrs = _get_keep_attrs(default=False) attrs = self._attrs if keep_attrs else None + # We need to return `Variable` rather than the type of `self` at the moment, ref + # #8216 return Variable(dims, data, attrs=attrs) @classmethod @@ -2193,7 +2193,7 @@ def quantile( keep_attrs: bool | None = None, skipna: bool | None = None, interpolation: QuantileMethods | None = None, - ) -> Variable: + ) -> Self: """Compute the qth quantile of the data along the specified dimension. Returns the qth quantiles(s) of the array elements. diff --git a/xarray/tests/test_dataarray.py b/xarray/tests/test_dataarray.py index 66bc69966d2..76dc4345ae7 100644 --- a/xarray/tests/test_dataarray.py +++ b/xarray/tests/test_dataarray.py @@ -4011,7 +4011,7 @@ def test_dot(self) -> None: assert_equal(expected5, actual5) with pytest.raises(NotImplementedError): - da.dot(dm3.to_dataset(name="dm")) # type: ignore + da.dot(dm3.to_dataset(name="dm")) with pytest.raises(TypeError): da.dot(dm3.values) # type: ignore From cdf07265f2256ebdc40a69eacae574e77f78fd6b Mon Sep 17 00:00:00 2001 From: Deepak Cherian Date: Fri, 22 Sep 2023 06:48:33 -0600 Subject: [PATCH 07/17] Allow creating DataArrays with nD coordinate variables (#8126) * Allow creating DataArrays with nD coordinate variables Closes #2233 Closes #8106 * more test more test# make_aggs.bash * Fix test * Apply suggestions from code review Co-authored-by: Michael Niklas * Update test --------- Co-authored-by: Michael Niklas Co-authored-by: Maximilian Roos <5635139+max-sixty@users.noreply.github.com> --- xarray/core/dataarray.py | 9 +-------- xarray/tests/test_dataarray.py | 36 +++++++++++++++++++++++++++++++--- 2 files changed, 34 insertions(+), 11 deletions(-) diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index 73464c07c82..724a5fc2580 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -128,7 +128,7 @@ def _check_coords_dims(shape, coords, dims): f"dimensions {dims}" ) - for d, s in zip(v.dims, v.shape): + for d, s in v.sizes.items(): if s != sizes[d]: raise ValueError( f"conflicting sizes for dimension {d!r}: " @@ -136,13 +136,6 @@ def _check_coords_dims(shape, coords, dims): f"coordinate {k!r}" ) - if k in sizes and v.shape != (sizes[k],): - raise ValueError( - f"coordinate {k!r} is a DataArray dimension, but " - f"it has shape {v.shape!r} rather than expected shape {sizes[k]!r} " - "matching the dimension size" - ) - def _infer_coords_and_dims( shape, coords, dims diff --git a/xarray/tests/test_dataarray.py b/xarray/tests/test_dataarray.py index 76dc4345ae7..11ebc4da347 100644 --- a/xarray/tests/test_dataarray.py +++ b/xarray/tests/test_dataarray.py @@ -38,6 +38,7 @@ from xarray.core.indexes import Index, PandasIndex, filter_indexes_from_coords from xarray.core.types import QueryEngineOptions, QueryParserOptions from xarray.core.utils import is_scalar +from xarray.testing import _assert_internal_invariants from xarray.tests import ( InaccessibleArray, ReturnItem, @@ -415,9 +416,6 @@ def test_constructor_invalid(self) -> None: with pytest.raises(ValueError, match=r"conflicting MultiIndex"): DataArray(np.random.rand(4, 4), [("x", self.mindex), ("level_1", range(4))]) - with pytest.raises(ValueError, match=r"matching the dimension size"): - DataArray(data, coords={"x": 0}, dims=["x", "y"]) - def test_constructor_from_self_described(self) -> None: data = [[-0.1, 21], [0, 2]] expected = DataArray( @@ -7112,3 +7110,35 @@ def test_error_on_ellipsis_without_list(self) -> None: da = DataArray([[1, 2], [1, 2]], dims=("x", "y")) with pytest.raises(ValueError): da.stack(flat=...) # type: ignore + + +def test_nD_coord_dataarray() -> None: + # should succeed + da = DataArray( + np.ones((2, 4)), + dims=("x", "y"), + coords={ + "x": (("x", "y"), np.arange(8).reshape((2, 4))), + "y": ("y", np.arange(4)), + }, + ) + _assert_internal_invariants(da, check_default_indexes=True) + + da2 = DataArray(np.ones(4), dims=("y"), coords={"y": ("y", np.arange(4))}) + da3 = DataArray(np.ones(4), dims=("z")) + + _, actual = xr.align(da, da2) + assert_identical(da2, actual) + + expected = da.drop_vars("x") + _, actual = xr.broadcast(da, da2) + assert_identical(expected, actual) + + actual, _ = xr.broadcast(da, da3) + expected = da.expand_dims(z=4, axis=-1) + assert_identical(actual, expected) + + da4 = DataArray(np.ones((2, 4)), coords={"x": 0}, dims=["x", "y"]) + _assert_internal_invariants(da4, check_default_indexes=True) + assert "x" not in da4.xindexes + assert "x" in da4.coords From 24bf8046d5e8492abc91db78b096644726cf8d6e Mon Sep 17 00:00:00 2001 From: Maximilian Roos <5635139+max-sixty@users.noreply.github.com> Date: Sat, 23 Sep 2023 12:13:13 -0700 Subject: [PATCH 08/17] Remove an import fallback (#8228) --- pyproject.toml | 1 - xarray/__init__.py | 8 ++------ 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index cb51c6ea741..25263928b20 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -96,7 +96,6 @@ module = [ "fsspec.*", "h5netcdf.*", "h5py.*", - "importlib_metadata.*", "iris.*", "matplotlib.*", "mpl_toolkits.*", diff --git a/xarray/__init__.py b/xarray/__init__.py index b63b0d81470..1fd3b0c4336 100644 --- a/xarray/__init__.py +++ b/xarray/__init__.py @@ -1,3 +1,5 @@ +from importlib.metadata import version as _version + from xarray import testing, tutorial from xarray.backends.api import ( load_dataarray, @@ -41,12 +43,6 @@ from xarray.core.variable import IndexVariable, Variable, as_variable from xarray.util.print_versions import show_versions -try: - from importlib.metadata import version as _version -except ImportError: - # if the fallback library is missing, we are doomed. - from importlib_metadata import version as _version - try: __version__ = _version("xarray") except Exception: From b14fbd9394a6195680150327d3c10fcb176bbc5f Mon Sep 17 00:00:00 2001 From: Maximilian Roos <5635139+max-sixty@users.noreply.github.com> Date: Sat, 23 Sep 2023 12:38:05 -0700 Subject: [PATCH 09/17] Add a `Literal` typing (#8227) * Add a `Literal` typing --- xarray/core/computation.py | 2 +- xarray/tests/test_computation.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/xarray/core/computation.py b/xarray/core/computation.py index 971f036b394..bae779af652 100644 --- a/xarray/core/computation.py +++ b/xarray/core/computation.py @@ -893,7 +893,7 @@ def apply_ufunc( dataset_fill_value: object = _NO_FILL_VALUE, keep_attrs: bool | str | None = None, kwargs: Mapping | None = None, - dask: str = "forbidden", + dask: Literal["forbidden", "allowed", "parallelized"] = "forbidden", output_dtypes: Sequence | None = None, output_sizes: Mapping[Any, int] | None = None, meta: Any = None, diff --git a/xarray/tests/test_computation.py b/xarray/tests/test_computation.py index b75e80db2da..87f8328e441 100644 --- a/xarray/tests/test_computation.py +++ b/xarray/tests/test_computation.py @@ -1190,7 +1190,7 @@ def test_apply_dask() -> None: # unknown setting for dask array handling with pytest.raises(ValueError): - apply_ufunc(identity, array, dask="unknown") + apply_ufunc(identity, array, dask="unknown") # type: ignore def dask_safe_identity(x): return apply_ufunc(identity, x, dask="allowed") From 77eaa8be439a61ae07939035d07d6890b74d53e8 Mon Sep 17 00:00:00 2001 From: Illviljan <14371165+Illviljan@users.noreply.github.com> Date: Sun, 24 Sep 2023 16:03:55 +0200 Subject: [PATCH 10/17] Add typing to functions related to data_vars (#8226) * Update dataset.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * more typing * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update xarray/core/dataset.py Co-authored-by: Michael Niklas * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Michael Niklas --- xarray/core/coordinates.py | 4 ++-- xarray/core/dataset.py | 13 ++++++------- xarray/core/types.py | 3 +++ 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/xarray/core/coordinates.py b/xarray/core/coordinates.py index 97ba383ebde..0c85b2a2d69 100644 --- a/xarray/core/coordinates.py +++ b/xarray/core/coordinates.py @@ -23,7 +23,7 @@ create_default_index_implicit, ) from xarray.core.merge import merge_coordinates_without_align, merge_coords -from xarray.core.types import Self, T_DataArray, T_Xarray +from xarray.core.types import DataVars, Self, T_DataArray, T_Xarray from xarray.core.utils import ( Frozen, ReprObject, @@ -937,7 +937,7 @@ def assert_coordinate_consistent(obj: T_Xarray, coords: Mapping[Any, Variable]) def create_coords_with_default_indexes( - coords: Mapping[Any, Any], data_vars: Mapping[Any, Any] | None = None + coords: Mapping[Any, Any], data_vars: DataVars | None = None ) -> Coordinates: """Returns a Coordinates object from a mapping of coordinates (arbitrary objects). diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index 9d771f0390c..44016e87306 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -124,7 +124,7 @@ from xarray.backends.api import T_NetcdfEngine, T_NetcdfTypes from xarray.core.dataarray import DataArray from xarray.core.groupby import DatasetGroupBy - from xarray.core.merge import CoercibleMapping, CoercibleValue + from xarray.core.merge import CoercibleMapping, CoercibleValue, _MergeResult from xarray.core.parallelcompat import ChunkManagerEntrypoint from xarray.core.resample import DatasetResample from xarray.core.rolling import DatasetCoarsen, DatasetRolling @@ -133,6 +133,7 @@ CoarsenBoundaryOptions, CombineAttrsOptions, CompatOptions, + DataVars, DatetimeLike, DatetimeUnitOptions, Dims, @@ -404,7 +405,7 @@ def _initialize_feasible(lb, ub): return param_defaults, bounds_defaults -def merge_data_and_coords(data_vars, coords): +def merge_data_and_coords(data_vars: DataVars, coords) -> _MergeResult: """Used in Dataset.__init__.""" if isinstance(coords, Coordinates): coords = coords.copy() @@ -666,7 +667,7 @@ def __init__( self, # could make a VariableArgs to use more generally, and refine these # categories - data_vars: Mapping[Any, Any] | None = None, + data_vars: DataVars | None = None, coords: Mapping[Any, Any] | None = None, attrs: Mapping[Any, Any] | None = None, ) -> None: @@ -1220,9 +1221,7 @@ def _overwrite_indexes( else: return replaced - def copy( - self, deep: bool = False, data: Mapping[Any, ArrayLike] | None = None - ) -> Self: + def copy(self, deep: bool = False, data: DataVars | None = None) -> Self: """Returns a copy of this dataset. If `deep=True`, a deep copy is made of each of the component variables. @@ -1324,7 +1323,7 @@ def copy( def _copy( self, deep: bool = False, - data: Mapping[Any, ArrayLike] | None = None, + data: DataVars | None = None, memo: dict[int, Any] | None = None, ) -> Self: if data is None: diff --git a/xarray/core/types.py b/xarray/core/types.py index e9e700b038e..6b6f9300631 100644 --- a/xarray/core/types.py +++ b/xarray/core/types.py @@ -187,6 +187,9 @@ def copy( T_Chunks = Union[int, dict[Any, Any], Literal["auto"], None] T_NormalizedChunks = tuple[tuple[int, ...], ...] +DataVars = Mapping[Any, Any] + + ErrorOptions = Literal["raise", "ignore"] ErrorOptionsWithWarn = Literal["raise", "warn", "ignore"] From a4f80b23d32e9c3986e3342182fe382d8081c3c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kai=20M=C3=BChlbauer?= Date: Sun, 24 Sep 2023 17:05:25 +0200 Subject: [PATCH 11/17] override `units` for datetime64/timedelta64 variables to preserve integer dtype (#8201) * remove `dtype` from encoding for datetime64/timedelta64 variables to prevent unnecessary casts * adapt tests * add whats-new.rst entry * Update xarray/coding/times.py Co-authored-by: Spencer Clark * Update doc/whats-new.rst Co-authored-by: Spencer Clark * add test per review suggestion, replace .kind-check with np.issubdtype-check * align timedelta64 check with datetime64 check * override units instead of dtype * remove print statement * warn in case of serialization to floating point, too * align if-else * Add instructions to warnings * Fix test * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * use warnings.catch_warnings * Update doc/whats-new.rst Co-authored-by: Spencer Clark --------- Co-authored-by: Spencer Clark Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- doc/whats-new.rst | 2 + xarray/coding/times.py | 110 +++++++++++++++++++++--------- xarray/tests/test_coding_times.py | 65 +++++++++++++++--- 3 files changed, 132 insertions(+), 45 deletions(-) diff --git a/doc/whats-new.rst b/doc/whats-new.rst index 67429ed7e18..5f18e999cc0 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -90,6 +90,8 @@ Bug fixes - ``.rolling_exp`` functions no longer mistakenly lose non-dimensioned coords (:issue:`6528`, :pull:`8114`) By `Maximilian Roos `_. +- In the event that user-provided datetime64/timedelta64 units and integer dtype encoding parameters conflict with each other, override the units to preserve an integer dtype for most faithful serialization to disk (:issue:`1064`, :pull:`8201`). + By `Kai Mühlbauer `_. Documentation ~~~~~~~~~~~~~ diff --git a/xarray/coding/times.py b/xarray/coding/times.py index 79efbecfb7c..2822f02dd8d 100644 --- a/xarray/coding/times.py +++ b/xarray/coding/times.py @@ -656,8 +656,22 @@ def cast_to_int_if_safe(num) -> np.ndarray: return num +def _division(deltas, delta, floor): + if floor: + # calculate int64 floor division + # to preserve integer dtype if possible (GH 4045, GH7817). + num = deltas // delta.astype(np.int64) + num = num.astype(np.int64, copy=False) + else: + num = deltas / delta + return num + + def encode_cf_datetime( - dates, units: str | None = None, calendar: str | None = None + dates, + units: str | None = None, + calendar: str | None = None, + dtype: np.dtype | None = None, ) -> tuple[np.ndarray, str, str]: """Given an array of datetime objects, returns the tuple `(num, units, calendar)` suitable for a CF compliant time variable. @@ -689,6 +703,12 @@ def encode_cf_datetime( time_units, ref_date = _unpack_time_units_and_ref_date(units) time_delta = _time_units_to_timedelta64(time_units) + # Wrap the dates in a DatetimeIndex to do the subtraction to ensure + # an OverflowError is raised if the ref_date is too far away from + # dates to be encoded (GH 2272). + dates_as_index = pd.DatetimeIndex(dates.ravel()) + time_deltas = dates_as_index - ref_date + # retrieve needed units to faithfully encode to int64 needed_units, data_ref_date = _unpack_time_units_and_ref_date(data_units) if data_units != units: @@ -697,26 +717,32 @@ def encode_cf_datetime( if ref_delta > np.timedelta64(0, "ns"): needed_units = _infer_time_units_from_diff(ref_delta) - # Wrap the dates in a DatetimeIndex to do the subtraction to ensure - # an OverflowError is raised if the ref_date is too far away from - # dates to be encoded (GH 2272). - dates_as_index = pd.DatetimeIndex(dates.ravel()) - time_deltas = dates_as_index - ref_date - # needed time delta to encode faithfully to int64 needed_time_delta = _time_units_to_timedelta64(needed_units) - if time_delta <= needed_time_delta: - # calculate int64 floor division - # to preserve integer dtype if possible (GH 4045, GH7817). - num = time_deltas // time_delta.astype(np.int64) - num = num.astype(np.int64, copy=False) - else: - emit_user_level_warning( - f"Times can't be serialized faithfully with requested units {units!r}. " - f"Resolution of {needed_units!r} needed. " - f"Serializing timeseries to floating point." - ) - num = time_deltas / time_delta + + floor_division = True + if time_delta > needed_time_delta: + floor_division = False + if dtype is None: + emit_user_level_warning( + f"Times can't be serialized faithfully to int64 with requested units {units!r}. " + f"Resolution of {needed_units!r} needed. Serializing times to floating point instead. " + f"Set encoding['dtype'] to integer dtype to serialize to int64. " + f"Set encoding['dtype'] to floating point dtype to silence this warning." + ) + elif np.issubdtype(dtype, np.integer): + new_units = f"{needed_units} since {format_timestamp(ref_date)}" + emit_user_level_warning( + f"Times can't be serialized faithfully to int64 with requested units {units!r}. " + f"Serializing with units {new_units!r} instead. " + f"Set encoding['dtype'] to floating point dtype to serialize with units {units!r}. " + f"Set encoding['units'] to {new_units!r} to silence this warning ." + ) + units = new_units + time_delta = needed_time_delta + floor_division = True + + num = _division(time_deltas, time_delta, floor_division) num = num.values.reshape(dates.shape) except (OutOfBoundsDatetime, OverflowError, ValueError): @@ -728,7 +754,9 @@ def encode_cf_datetime( return (num, units, calendar) -def encode_cf_timedelta(timedeltas, units: str | None = None) -> tuple[np.ndarray, str]: +def encode_cf_timedelta( + timedeltas, units: str | None = None, dtype: np.dtype | None = None +) -> tuple[np.ndarray, str]: data_units = infer_timedelta_units(timedeltas) if units is None: @@ -744,18 +772,29 @@ def encode_cf_timedelta(timedeltas, units: str | None = None) -> tuple[np.ndarra # needed time delta to encode faithfully to int64 needed_time_delta = _time_units_to_timedelta64(needed_units) - if time_delta <= needed_time_delta: - # calculate int64 floor division - # to preserve integer dtype if possible - num = time_deltas // time_delta.astype(np.int64) - num = num.astype(np.int64, copy=False) - else: - emit_user_level_warning( - f"Timedeltas can't be serialized faithfully with requested units {units!r}. " - f"Resolution of {needed_units!r} needed. " - f"Serializing timedeltas to floating point." - ) - num = time_deltas / time_delta + + floor_division = True + if time_delta > needed_time_delta: + floor_division = False + if dtype is None: + emit_user_level_warning( + f"Timedeltas can't be serialized faithfully to int64 with requested units {units!r}. " + f"Resolution of {needed_units!r} needed. Serializing timeseries to floating point instead. " + f"Set encoding['dtype'] to integer dtype to serialize to int64. " + f"Set encoding['dtype'] to floating point dtype to silence this warning." + ) + elif np.issubdtype(dtype, np.integer): + emit_user_level_warning( + f"Timedeltas can't be serialized faithfully with requested units {units!r}. " + f"Serializing with units {needed_units!r} instead. " + f"Set encoding['dtype'] to floating point dtype to serialize with units {units!r}. " + f"Set encoding['units'] to {needed_units!r} to silence this warning ." + ) + units = needed_units + time_delta = needed_time_delta + floor_division = True + + num = _division(time_deltas, time_delta, floor_division) num = num.values.reshape(timedeltas.shape) return (num, units) @@ -772,7 +811,8 @@ def encode(self, variable: Variable, name: T_Name = None) -> Variable: units = encoding.pop("units", None) calendar = encoding.pop("calendar", None) - (data, units, calendar) = encode_cf_datetime(data, units, calendar) + dtype = encoding.get("dtype", None) + (data, units, calendar) = encode_cf_datetime(data, units, calendar, dtype) safe_setitem(attrs, "units", units, name=name) safe_setitem(attrs, "calendar", calendar, name=name) @@ -807,7 +847,9 @@ def encode(self, variable: Variable, name: T_Name = None) -> Variable: if np.issubdtype(variable.data.dtype, np.timedelta64): dims, data, attrs, encoding = unpack_for_encoding(variable) - data, units = encode_cf_timedelta(data, encoding.pop("units", None)) + data, units = encode_cf_timedelta( + data, encoding.pop("units", None), encoding.get("dtype", None) + ) safe_setitem(attrs, "units", units, name=name) return Variable(dims, data, attrs, encoding, fastpath=True) diff --git a/xarray/tests/test_coding_times.py b/xarray/tests/test_coding_times.py index 079e432b565..5f76a4a2ca8 100644 --- a/xarray/tests/test_coding_times.py +++ b/xarray/tests/test_coding_times.py @@ -30,7 +30,7 @@ from xarray.coding.variables import SerializationWarning from xarray.conventions import _update_bounds_attributes, cf_encoder from xarray.core.common import contains_cftime_datetimes -from xarray.testing import assert_allclose, assert_equal, assert_identical +from xarray.testing import assert_equal, assert_identical from xarray.tests import ( FirstElementAccessibleArray, arm_xfail, @@ -1036,7 +1036,7 @@ def test_encode_cf_datetime_defaults_to_correct_dtype( pytest.skip("Nanosecond frequency is not valid for cftime dates.") times = date_range("2000", periods=3, freq=freq) units = f"{encoding_units} since 2000-01-01" - encoded, _, _ = coding.times.encode_cf_datetime(times, units) + encoded, _units, _ = coding.times.encode_cf_datetime(times, units) numpy_timeunit = coding.times._netcdf_to_numpy_timeunit(encoding_units) encoding_units_as_timedelta = np.timedelta64(1, numpy_timeunit) @@ -1212,6 +1212,7 @@ def test_contains_cftime_lazy() -> None: ("1677-09-21T00:12:43.145224193", "ns", np.int64, None, False), ("1677-09-21T00:12:43.145225", "us", np.int64, None, False), ("1970-01-01T00:00:01.000001", "us", np.int64, None, False), + ("1677-09-21T00:21:52.901038080", "ns", np.float32, 20.0, True), ], ) def test_roundtrip_datetime64_nanosecond_precision( @@ -1261,14 +1262,52 @@ def test_roundtrip_datetime64_nanosecond_precision_warning() -> None: ] units = "days since 1970-01-10T01:01:00" needed_units = "hours" - encoding = dict(_FillValue=20, units=units) + new_units = f"{needed_units} since 1970-01-10T01:01:00" + + encoding = dict(dtype=None, _FillValue=20, units=units) var = Variable(["time"], times, encoding=encoding) - wmsg = ( - f"Times can't be serialized faithfully with requested units {units!r}. " - f"Resolution of {needed_units!r} needed. " - ) - with pytest.warns(UserWarning, match=wmsg): + with pytest.warns(UserWarning, match=f"Resolution of {needed_units!r} needed."): + encoded_var = conventions.encode_cf_variable(var) + assert encoded_var.dtype == np.float64 + assert encoded_var.attrs["units"] == units + assert encoded_var.attrs["_FillValue"] == 20.0 + + decoded_var = conventions.decode_cf_variable("foo", encoded_var) + assert_identical(var, decoded_var) + + encoding = dict(dtype="int64", _FillValue=20, units=units) + var = Variable(["time"], times, encoding=encoding) + with pytest.warns( + UserWarning, match=f"Serializing with units {new_units!r} instead." + ): + encoded_var = conventions.encode_cf_variable(var) + assert encoded_var.dtype == np.int64 + assert encoded_var.attrs["units"] == new_units + assert encoded_var.attrs["_FillValue"] == 20 + + decoded_var = conventions.decode_cf_variable("foo", encoded_var) + assert_identical(var, decoded_var) + + encoding = dict(dtype="float64", _FillValue=20, units=units) + var = Variable(["time"], times, encoding=encoding) + with warnings.catch_warnings(): + warnings.simplefilter("error") + encoded_var = conventions.encode_cf_variable(var) + assert encoded_var.dtype == np.float64 + assert encoded_var.attrs["units"] == units + assert encoded_var.attrs["_FillValue"] == 20.0 + + decoded_var = conventions.decode_cf_variable("foo", encoded_var) + assert_identical(var, decoded_var) + + encoding = dict(dtype="int64", _FillValue=20, units=new_units) + var = Variable(["time"], times, encoding=encoding) + with warnings.catch_warnings(): + warnings.simplefilter("error") encoded_var = conventions.encode_cf_variable(var) + assert encoded_var.dtype == np.int64 + assert encoded_var.attrs["units"] == new_units + assert encoded_var.attrs["_FillValue"] == 20 decoded_var = conventions.decode_cf_variable("foo", encoded_var) assert_identical(var, decoded_var) @@ -1309,14 +1348,18 @@ def test_roundtrip_timedelta64_nanosecond_precision_warning() -> None: needed_units = "hours" wmsg = ( f"Timedeltas can't be serialized faithfully with requested units {units!r}. " - f"Resolution of {needed_units!r} needed. " + f"Serializing with units {needed_units!r} instead." ) - encoding = dict(_FillValue=20, units=units) + encoding = dict(dtype=np.int64, _FillValue=20, units=units) var = Variable(["time"], timedelta_values, encoding=encoding) with pytest.warns(UserWarning, match=wmsg): encoded_var = conventions.encode_cf_variable(var) + assert encoded_var.dtype == np.int64 + assert encoded_var.attrs["units"] == needed_units + assert encoded_var.attrs["_FillValue"] == 20 decoded_var = conventions.decode_cf_variable("foo", encoded_var) - assert_allclose(var, decoded_var) + assert_identical(var, decoded_var) + assert decoded_var.encoding["dtype"] == np.int64 def test_roundtrip_float_times() -> None: From 05b3a211d5b93acd45be19eab1d0a6e9c72cff8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kai=20M=C3=BChlbauer?= Date: Sun, 24 Sep 2023 17:28:30 +0200 Subject: [PATCH 12/17] test_interpolate_pd_compat with range of fill_value's (#8189) * ENH: test_interpolate_pd_compat with range of fill_value's * add whats-new.rst entry --- doc/whats-new.rst | 3 +++ xarray/tests/test_missing.py | 28 +++++++++++++++++++--------- 2 files changed, 22 insertions(+), 9 deletions(-) diff --git a/doc/whats-new.rst b/doc/whats-new.rst index 5f18e999cc0..9a21bcb7ab9 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -108,6 +108,9 @@ Internal Changes than `.reduce`, as the start of a broader effort to move non-reducing functions away from ```.reduce``, (:pull:`8114`). By `Maximilian Roos `_. +- Test range of fill_value's in test_interpolate_pd_compat (:issue:`8146`, :pull:`8189`). + By `Kai Mühlbauer `_. + .. _whats-new.2023.08.0: diff --git a/xarray/tests/test_missing.py b/xarray/tests/test_missing.py index fe2cdc58807..c57d84c927d 100644 --- a/xarray/tests/test_missing.py +++ b/xarray/tests/test_missing.py @@ -92,26 +92,36 @@ def make_interpolate_example_data(shape, frac_nan, seed=12345, non_uniform=False return da, df +@pytest.mark.parametrize("fill_value", [None, np.nan, 47.11]) +@pytest.mark.parametrize( + "method", ["linear", "nearest", "zero", "slinear", "quadratic", "cubic"] +) @requires_scipy -def test_interpolate_pd_compat(): +def test_interpolate_pd_compat(method, fill_value) -> None: shapes = [(8, 8), (1, 20), (20, 1), (100, 100)] frac_nans = [0, 0.5, 1] - methods = ["linear", "nearest", "zero", "slinear", "quadratic", "cubic"] - for shape, frac_nan, method in itertools.product(shapes, frac_nans, methods): + for shape, frac_nan in itertools.product(shapes, frac_nans): da, df = make_interpolate_example_data(shape, frac_nan) for dim in ["time", "x"]: - actual = da.interpolate_na(method=method, dim=dim, fill_value=np.nan) + actual = da.interpolate_na(method=method, dim=dim, fill_value=fill_value) + # need limit_direction="both" here, to let pandas fill + # in both directions instead of default forward direction only expected = df.interpolate( method=method, axis=da.get_axis_num(dim), + limit_direction="both", + fill_value=fill_value, ) - # Note, Pandas does some odd things with the left/right fill_value - # for the linear methods. This next line inforces the xarray - # fill_value convention on the pandas output. Therefore, this test - # only checks that interpolated values are the same (not nans) - expected.values[pd.isnull(actual.values)] = np.nan + + if method == "linear": + # Note, Pandas does not take left/right fill_value into account + # for the numpy linear methods. + # see https://github.com/pandas-dev/pandas/issues/55144 + # This aligns the pandas output with the xarray output + expected.values[pd.isnull(actual.values)] = np.nan + expected.values[actual.values == fill_value] = fill_value np.testing.assert_allclose(actual.values, expected.values) From 565b23b95beda893e0d66d1e2c6da49984bb0925 Mon Sep 17 00:00:00 2001 From: Michael Niklas Date: Mon, 25 Sep 2023 06:43:54 +0200 Subject: [PATCH 13/17] Rewrite typed_ops (#8204) * rewrite typed_ops * improved typing of rolling instance attrs * add typed_ops xr.Variable tests * add typed_ops test * add minor typehint * adjust to numpy 1.24 * add groupby ops type tests * remove wrong types from ops * fix Dataset not being part of SupportsArray Protocol * ignore mypy align complaint * add reasons for type ignores in test * add overloads for variable typed ops * move tests to their own module * add entry to whats-new --- doc/whats-new.rst | 3 + xarray/core/_typed_ops.py | 591 ++++++++++++++++--------- xarray/core/_typed_ops.pyi | 782 --------------------------------- xarray/core/dataarray.py | 15 +- xarray/core/dataset.py | 21 +- xarray/core/rolling.py | 28 +- xarray/core/types.py | 7 +- xarray/core/weighted.py | 1 + xarray/tests/test_groupby.py | 4 +- xarray/tests/test_typed_ops.py | 246 +++++++++++ xarray/util/generate_ops.py | 286 ++++++------ 11 files changed, 827 insertions(+), 1157 deletions(-) delete mode 100644 xarray/core/_typed_ops.pyi create mode 100644 xarray/tests/test_typed_ops.py diff --git a/doc/whats-new.rst b/doc/whats-new.rst index 9a21bcb7ab9..4307c2829ca 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -92,6 +92,9 @@ Bug fixes By `Maximilian Roos `_. - In the event that user-provided datetime64/timedelta64 units and integer dtype encoding parameters conflict with each other, override the units to preserve an integer dtype for most faithful serialization to disk (:issue:`1064`, :pull:`8201`). By `Kai Mühlbauer `_. +- Static typing of dunder ops methods (like :py:meth:`DataArray.__eq__`) has been fixed. + Remaining issues are upstream problems (:issue:`7780`, :pull:`8204`). + By `Michael Niklas `_. Documentation ~~~~~~~~~~~~~ diff --git a/xarray/core/_typed_ops.py b/xarray/core/_typed_ops.py index d3a783be45d..330d13bb217 100644 --- a/xarray/core/_typed_ops.py +++ b/xarray/core/_typed_ops.py @@ -1,165 +1,182 @@ """Mixin classes with arithmetic operators.""" # This file was generated using xarray.util.generate_ops. Do not edit manually. +from __future__ import annotations + import operator +from typing import TYPE_CHECKING, Any, Callable, NoReturn, overload from xarray.core import nputils, ops +from xarray.core.types import ( + DaCompatible, + DsCompatible, + GroupByCompatible, + Self, + T_DataArray, + T_Xarray, + VarCompatible, +) + +if TYPE_CHECKING: + from xarray.core.dataset import Dataset class DatasetOpsMixin: __slots__ = () - def _binary_op(self, other, f, reflexive=False): + def _binary_op( + self, other: DsCompatible, f: Callable, reflexive: bool = False + ) -> Self: raise NotImplementedError - def __add__(self, other): + def __add__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.add) - def __sub__(self, other): + def __sub__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.sub) - def __mul__(self, other): + def __mul__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.mul) - def __pow__(self, other): + def __pow__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.pow) - def __truediv__(self, other): + def __truediv__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.truediv) - def __floordiv__(self, other): + def __floordiv__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.floordiv) - def __mod__(self, other): + def __mod__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.mod) - def __and__(self, other): + def __and__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.and_) - def __xor__(self, other): + def __xor__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.xor) - def __or__(self, other): + def __or__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.or_) - def __lshift__(self, other): + def __lshift__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.lshift) - def __rshift__(self, other): + def __rshift__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.rshift) - def __lt__(self, other): + def __lt__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.lt) - def __le__(self, other): + def __le__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.le) - def __gt__(self, other): + def __gt__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.gt) - def __ge__(self, other): + def __ge__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.ge) - def __eq__(self, other): + def __eq__(self, other: DsCompatible) -> Self: # type:ignore[override] return self._binary_op(other, nputils.array_eq) - def __ne__(self, other): + def __ne__(self, other: DsCompatible) -> Self: # type:ignore[override] return self._binary_op(other, nputils.array_ne) - def __radd__(self, other): + def __radd__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.add, reflexive=True) - def __rsub__(self, other): + def __rsub__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.sub, reflexive=True) - def __rmul__(self, other): + def __rmul__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.mul, reflexive=True) - def __rpow__(self, other): + def __rpow__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.pow, reflexive=True) - def __rtruediv__(self, other): + def __rtruediv__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.truediv, reflexive=True) - def __rfloordiv__(self, other): + def __rfloordiv__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.floordiv, reflexive=True) - def __rmod__(self, other): + def __rmod__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.mod, reflexive=True) - def __rand__(self, other): + def __rand__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.and_, reflexive=True) - def __rxor__(self, other): + def __rxor__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.xor, reflexive=True) - def __ror__(self, other): + def __ror__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.or_, reflexive=True) - def _inplace_binary_op(self, other, f): + def _inplace_binary_op(self, other: DsCompatible, f: Callable) -> Self: raise NotImplementedError - def __iadd__(self, other): + def __iadd__(self, other: DsCompatible) -> Self: return self._inplace_binary_op(other, operator.iadd) - def __isub__(self, other): + def __isub__(self, other: DsCompatible) -> Self: return self._inplace_binary_op(other, operator.isub) - def __imul__(self, other): + def __imul__(self, other: DsCompatible) -> Self: return self._inplace_binary_op(other, operator.imul) - def __ipow__(self, other): + def __ipow__(self, other: DsCompatible) -> Self: return self._inplace_binary_op(other, operator.ipow) - def __itruediv__(self, other): + def __itruediv__(self, other: DsCompatible) -> Self: return self._inplace_binary_op(other, operator.itruediv) - def __ifloordiv__(self, other): + def __ifloordiv__(self, other: DsCompatible) -> Self: return self._inplace_binary_op(other, operator.ifloordiv) - def __imod__(self, other): + def __imod__(self, other: DsCompatible) -> Self: return self._inplace_binary_op(other, operator.imod) - def __iand__(self, other): + def __iand__(self, other: DsCompatible) -> Self: return self._inplace_binary_op(other, operator.iand) - def __ixor__(self, other): + def __ixor__(self, other: DsCompatible) -> Self: return self._inplace_binary_op(other, operator.ixor) - def __ior__(self, other): + def __ior__(self, other: DsCompatible) -> Self: return self._inplace_binary_op(other, operator.ior) - def __ilshift__(self, other): + def __ilshift__(self, other: DsCompatible) -> Self: return self._inplace_binary_op(other, operator.ilshift) - def __irshift__(self, other): + def __irshift__(self, other: DsCompatible) -> Self: return self._inplace_binary_op(other, operator.irshift) - def _unary_op(self, f, *args, **kwargs): + def _unary_op(self, f: Callable, *args: Any, **kwargs: Any) -> Self: raise NotImplementedError - def __neg__(self): + def __neg__(self) -> Self: return self._unary_op(operator.neg) - def __pos__(self): + def __pos__(self) -> Self: return self._unary_op(operator.pos) - def __abs__(self): + def __abs__(self) -> Self: return self._unary_op(operator.abs) - def __invert__(self): + def __invert__(self) -> Self: return self._unary_op(operator.invert) - def round(self, *args, **kwargs): + def round(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.round_, *args, **kwargs) - def argsort(self, *args, **kwargs): + def argsort(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.argsort, *args, **kwargs) - def conj(self, *args, **kwargs): + def conj(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.conj, *args, **kwargs) - def conjugate(self, *args, **kwargs): + def conjugate(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.conjugate, *args, **kwargs) __add__.__doc__ = operator.add.__doc__ @@ -215,157 +232,159 @@ def conjugate(self, *args, **kwargs): class DataArrayOpsMixin: __slots__ = () - def _binary_op(self, other, f, reflexive=False): + def _binary_op( + self, other: DaCompatible, f: Callable, reflexive: bool = False + ) -> Self: raise NotImplementedError - def __add__(self, other): + def __add__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.add) - def __sub__(self, other): + def __sub__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.sub) - def __mul__(self, other): + def __mul__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.mul) - def __pow__(self, other): + def __pow__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.pow) - def __truediv__(self, other): + def __truediv__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.truediv) - def __floordiv__(self, other): + def __floordiv__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.floordiv) - def __mod__(self, other): + def __mod__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.mod) - def __and__(self, other): + def __and__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.and_) - def __xor__(self, other): + def __xor__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.xor) - def __or__(self, other): + def __or__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.or_) - def __lshift__(self, other): + def __lshift__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.lshift) - def __rshift__(self, other): + def __rshift__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.rshift) - def __lt__(self, other): + def __lt__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.lt) - def __le__(self, other): + def __le__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.le) - def __gt__(self, other): + def __gt__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.gt) - def __ge__(self, other): + def __ge__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.ge) - def __eq__(self, other): + def __eq__(self, other: DaCompatible) -> Self: # type:ignore[override] return self._binary_op(other, nputils.array_eq) - def __ne__(self, other): + def __ne__(self, other: DaCompatible) -> Self: # type:ignore[override] return self._binary_op(other, nputils.array_ne) - def __radd__(self, other): + def __radd__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.add, reflexive=True) - def __rsub__(self, other): + def __rsub__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.sub, reflexive=True) - def __rmul__(self, other): + def __rmul__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.mul, reflexive=True) - def __rpow__(self, other): + def __rpow__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.pow, reflexive=True) - def __rtruediv__(self, other): + def __rtruediv__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.truediv, reflexive=True) - def __rfloordiv__(self, other): + def __rfloordiv__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.floordiv, reflexive=True) - def __rmod__(self, other): + def __rmod__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.mod, reflexive=True) - def __rand__(self, other): + def __rand__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.and_, reflexive=True) - def __rxor__(self, other): + def __rxor__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.xor, reflexive=True) - def __ror__(self, other): + def __ror__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.or_, reflexive=True) - def _inplace_binary_op(self, other, f): + def _inplace_binary_op(self, other: DaCompatible, f: Callable) -> Self: raise NotImplementedError - def __iadd__(self, other): + def __iadd__(self, other: DaCompatible) -> Self: return self._inplace_binary_op(other, operator.iadd) - def __isub__(self, other): + def __isub__(self, other: DaCompatible) -> Self: return self._inplace_binary_op(other, operator.isub) - def __imul__(self, other): + def __imul__(self, other: DaCompatible) -> Self: return self._inplace_binary_op(other, operator.imul) - def __ipow__(self, other): + def __ipow__(self, other: DaCompatible) -> Self: return self._inplace_binary_op(other, operator.ipow) - def __itruediv__(self, other): + def __itruediv__(self, other: DaCompatible) -> Self: return self._inplace_binary_op(other, operator.itruediv) - def __ifloordiv__(self, other): + def __ifloordiv__(self, other: DaCompatible) -> Self: return self._inplace_binary_op(other, operator.ifloordiv) - def __imod__(self, other): + def __imod__(self, other: DaCompatible) -> Self: return self._inplace_binary_op(other, operator.imod) - def __iand__(self, other): + def __iand__(self, other: DaCompatible) -> Self: return self._inplace_binary_op(other, operator.iand) - def __ixor__(self, other): + def __ixor__(self, other: DaCompatible) -> Self: return self._inplace_binary_op(other, operator.ixor) - def __ior__(self, other): + def __ior__(self, other: DaCompatible) -> Self: return self._inplace_binary_op(other, operator.ior) - def __ilshift__(self, other): + def __ilshift__(self, other: DaCompatible) -> Self: return self._inplace_binary_op(other, operator.ilshift) - def __irshift__(self, other): + def __irshift__(self, other: DaCompatible) -> Self: return self._inplace_binary_op(other, operator.irshift) - def _unary_op(self, f, *args, **kwargs): + def _unary_op(self, f: Callable, *args: Any, **kwargs: Any) -> Self: raise NotImplementedError - def __neg__(self): + def __neg__(self) -> Self: return self._unary_op(operator.neg) - def __pos__(self): + def __pos__(self) -> Self: return self._unary_op(operator.pos) - def __abs__(self): + def __abs__(self) -> Self: return self._unary_op(operator.abs) - def __invert__(self): + def __invert__(self) -> Self: return self._unary_op(operator.invert) - def round(self, *args, **kwargs): + def round(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.round_, *args, **kwargs) - def argsort(self, *args, **kwargs): + def argsort(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.argsort, *args, **kwargs) - def conj(self, *args, **kwargs): + def conj(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.conj, *args, **kwargs) - def conjugate(self, *args, **kwargs): + def conjugate(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.conjugate, *args, **kwargs) __add__.__doc__ = operator.add.__doc__ @@ -421,157 +440,303 @@ def conjugate(self, *args, **kwargs): class VariableOpsMixin: __slots__ = () - def _binary_op(self, other, f, reflexive=False): + def _binary_op( + self, other: VarCompatible, f: Callable, reflexive: bool = False + ) -> Self: raise NotImplementedError - def __add__(self, other): + @overload + def __add__(self, other: T_DataArray) -> NoReturn: + ... + + @overload + def __add__(self, other: VarCompatible) -> Self: + ... + + def __add__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.add) - def __sub__(self, other): + @overload + def __sub__(self, other: T_DataArray) -> NoReturn: + ... + + @overload + def __sub__(self, other: VarCompatible) -> Self: + ... + + def __sub__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.sub) - def __mul__(self, other): + @overload + def __mul__(self, other: T_DataArray) -> NoReturn: + ... + + @overload + def __mul__(self, other: VarCompatible) -> Self: + ... + + def __mul__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.mul) - def __pow__(self, other): + @overload + def __pow__(self, other: T_DataArray) -> NoReturn: + ... + + @overload + def __pow__(self, other: VarCompatible) -> Self: + ... + + def __pow__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.pow) - def __truediv__(self, other): + @overload + def __truediv__(self, other: T_DataArray) -> NoReturn: + ... + + @overload + def __truediv__(self, other: VarCompatible) -> Self: + ... + + def __truediv__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.truediv) - def __floordiv__(self, other): + @overload + def __floordiv__(self, other: T_DataArray) -> NoReturn: + ... + + @overload + def __floordiv__(self, other: VarCompatible) -> Self: + ... + + def __floordiv__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.floordiv) - def __mod__(self, other): + @overload + def __mod__(self, other: T_DataArray) -> NoReturn: + ... + + @overload + def __mod__(self, other: VarCompatible) -> Self: + ... + + def __mod__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.mod) - def __and__(self, other): + @overload + def __and__(self, other: T_DataArray) -> NoReturn: + ... + + @overload + def __and__(self, other: VarCompatible) -> Self: + ... + + def __and__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.and_) - def __xor__(self, other): + @overload + def __xor__(self, other: T_DataArray) -> NoReturn: + ... + + @overload + def __xor__(self, other: VarCompatible) -> Self: + ... + + def __xor__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.xor) - def __or__(self, other): + @overload + def __or__(self, other: T_DataArray) -> NoReturn: + ... + + @overload + def __or__(self, other: VarCompatible) -> Self: + ... + + def __or__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.or_) - def __lshift__(self, other): + @overload + def __lshift__(self, other: T_DataArray) -> NoReturn: + ... + + @overload + def __lshift__(self, other: VarCompatible) -> Self: + ... + + def __lshift__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.lshift) - def __rshift__(self, other): + @overload + def __rshift__(self, other: T_DataArray) -> NoReturn: + ... + + @overload + def __rshift__(self, other: VarCompatible) -> Self: + ... + + def __rshift__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.rshift) - def __lt__(self, other): + @overload + def __lt__(self, other: T_DataArray) -> NoReturn: + ... + + @overload + def __lt__(self, other: VarCompatible) -> Self: + ... + + def __lt__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.lt) - def __le__(self, other): + @overload + def __le__(self, other: T_DataArray) -> NoReturn: + ... + + @overload + def __le__(self, other: VarCompatible) -> Self: + ... + + def __le__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.le) - def __gt__(self, other): + @overload + def __gt__(self, other: T_DataArray) -> NoReturn: + ... + + @overload + def __gt__(self, other: VarCompatible) -> Self: + ... + + def __gt__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.gt) - def __ge__(self, other): + @overload + def __ge__(self, other: T_DataArray) -> NoReturn: + ... + + @overload + def __ge__(self, other: VarCompatible) -> Self: + ... + + def __ge__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.ge) - def __eq__(self, other): + @overload # type:ignore[override] + def __eq__(self, other: T_DataArray) -> NoReturn: + ... + + @overload + def __eq__(self, other: VarCompatible) -> Self: + ... + + def __eq__(self, other: VarCompatible) -> Self: return self._binary_op(other, nputils.array_eq) - def __ne__(self, other): + @overload # type:ignore[override] + def __ne__(self, other: T_DataArray) -> NoReturn: + ... + + @overload + def __ne__(self, other: VarCompatible) -> Self: + ... + + def __ne__(self, other: VarCompatible) -> Self: return self._binary_op(other, nputils.array_ne) - def __radd__(self, other): + def __radd__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.add, reflexive=True) - def __rsub__(self, other): + def __rsub__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.sub, reflexive=True) - def __rmul__(self, other): + def __rmul__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.mul, reflexive=True) - def __rpow__(self, other): + def __rpow__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.pow, reflexive=True) - def __rtruediv__(self, other): + def __rtruediv__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.truediv, reflexive=True) - def __rfloordiv__(self, other): + def __rfloordiv__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.floordiv, reflexive=True) - def __rmod__(self, other): + def __rmod__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.mod, reflexive=True) - def __rand__(self, other): + def __rand__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.and_, reflexive=True) - def __rxor__(self, other): + def __rxor__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.xor, reflexive=True) - def __ror__(self, other): + def __ror__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.or_, reflexive=True) - def _inplace_binary_op(self, other, f): + def _inplace_binary_op(self, other: VarCompatible, f: Callable) -> Self: raise NotImplementedError - def __iadd__(self, other): + def __iadd__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.iadd) - def __isub__(self, other): + def __isub__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.isub) - def __imul__(self, other): + def __imul__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.imul) - def __ipow__(self, other): + def __ipow__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ipow) - def __itruediv__(self, other): + def __itruediv__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.itruediv) - def __ifloordiv__(self, other): + def __ifloordiv__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ifloordiv) - def __imod__(self, other): + def __imod__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.imod) - def __iand__(self, other): + def __iand__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.iand) - def __ixor__(self, other): + def __ixor__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ixor) - def __ior__(self, other): + def __ior__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ior) - def __ilshift__(self, other): + def __ilshift__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ilshift) - def __irshift__(self, other): + def __irshift__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.irshift) - def _unary_op(self, f, *args, **kwargs): + def _unary_op(self, f: Callable, *args: Any, **kwargs: Any) -> Self: raise NotImplementedError - def __neg__(self): + def __neg__(self) -> Self: return self._unary_op(operator.neg) - def __pos__(self): + def __pos__(self) -> Self: return self._unary_op(operator.pos) - def __abs__(self): + def __abs__(self) -> Self: return self._unary_op(operator.abs) - def __invert__(self): + def __invert__(self) -> Self: return self._unary_op(operator.invert) - def round(self, *args, **kwargs): + def round(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.round_, *args, **kwargs) - def argsort(self, *args, **kwargs): + def argsort(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.argsort, *args, **kwargs) - def conj(self, *args, **kwargs): + def conj(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.conj, *args, **kwargs) - def conjugate(self, *args, **kwargs): + def conjugate(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.conjugate, *args, **kwargs) __add__.__doc__ = operator.add.__doc__ @@ -627,91 +792,93 @@ def conjugate(self, *args, **kwargs): class DatasetGroupByOpsMixin: __slots__ = () - def _binary_op(self, other, f, reflexive=False): + def _binary_op( + self, other: GroupByCompatible, f: Callable, reflexive: bool = False + ) -> Dataset: raise NotImplementedError - def __add__(self, other): + def __add__(self, other: GroupByCompatible) -> Dataset: return self._binary_op(other, operator.add) - def __sub__(self, other): + def __sub__(self, other: GroupByCompatible) -> Dataset: return self._binary_op(other, operator.sub) - def __mul__(self, other): + def __mul__(self, other: GroupByCompatible) -> Dataset: return self._binary_op(other, operator.mul) - def __pow__(self, other): + def __pow__(self, other: GroupByCompatible) -> Dataset: return self._binary_op(other, operator.pow) - def __truediv__(self, other): + def __truediv__(self, other: GroupByCompatible) -> Dataset: return self._binary_op(other, operator.truediv) - def __floordiv__(self, other): + def __floordiv__(self, other: GroupByCompatible) -> Dataset: return self._binary_op(other, operator.floordiv) - def __mod__(self, other): + def __mod__(self, other: GroupByCompatible) -> Dataset: return self._binary_op(other, operator.mod) - def __and__(self, other): + def __and__(self, other: GroupByCompatible) -> Dataset: return self._binary_op(other, operator.and_) - def __xor__(self, other): + def __xor__(self, other: GroupByCompatible) -> Dataset: return self._binary_op(other, operator.xor) - def __or__(self, other): + def __or__(self, other: GroupByCompatible) -> Dataset: return self._binary_op(other, operator.or_) - def __lshift__(self, other): + def __lshift__(self, other: GroupByCompatible) -> Dataset: return self._binary_op(other, operator.lshift) - def __rshift__(self, other): + def __rshift__(self, other: GroupByCompatible) -> Dataset: return self._binary_op(other, operator.rshift) - def __lt__(self, other): + def __lt__(self, other: GroupByCompatible) -> Dataset: return self._binary_op(other, operator.lt) - def __le__(self, other): + def __le__(self, other: GroupByCompatible) -> Dataset: return self._binary_op(other, operator.le) - def __gt__(self, other): + def __gt__(self, other: GroupByCompatible) -> Dataset: return self._binary_op(other, operator.gt) - def __ge__(self, other): + def __ge__(self, other: GroupByCompatible) -> Dataset: return self._binary_op(other, operator.ge) - def __eq__(self, other): + def __eq__(self, other: GroupByCompatible) -> Dataset: # type:ignore[override] return self._binary_op(other, nputils.array_eq) - def __ne__(self, other): + def __ne__(self, other: GroupByCompatible) -> Dataset: # type:ignore[override] return self._binary_op(other, nputils.array_ne) - def __radd__(self, other): + def __radd__(self, other: GroupByCompatible) -> Dataset: return self._binary_op(other, operator.add, reflexive=True) - def __rsub__(self, other): + def __rsub__(self, other: GroupByCompatible) -> Dataset: return self._binary_op(other, operator.sub, reflexive=True) - def __rmul__(self, other): + def __rmul__(self, other: GroupByCompatible) -> Dataset: return self._binary_op(other, operator.mul, reflexive=True) - def __rpow__(self, other): + def __rpow__(self, other: GroupByCompatible) -> Dataset: return self._binary_op(other, operator.pow, reflexive=True) - def __rtruediv__(self, other): + def __rtruediv__(self, other: GroupByCompatible) -> Dataset: return self._binary_op(other, operator.truediv, reflexive=True) - def __rfloordiv__(self, other): + def __rfloordiv__(self, other: GroupByCompatible) -> Dataset: return self._binary_op(other, operator.floordiv, reflexive=True) - def __rmod__(self, other): + def __rmod__(self, other: GroupByCompatible) -> Dataset: return self._binary_op(other, operator.mod, reflexive=True) - def __rand__(self, other): + def __rand__(self, other: GroupByCompatible) -> Dataset: return self._binary_op(other, operator.and_, reflexive=True) - def __rxor__(self, other): + def __rxor__(self, other: GroupByCompatible) -> Dataset: return self._binary_op(other, operator.xor, reflexive=True) - def __ror__(self, other): + def __ror__(self, other: GroupByCompatible) -> Dataset: return self._binary_op(other, operator.or_, reflexive=True) __add__.__doc__ = operator.add.__doc__ @@ -747,91 +914,93 @@ def __ror__(self, other): class DataArrayGroupByOpsMixin: __slots__ = () - def _binary_op(self, other, f, reflexive=False): + def _binary_op( + self, other: T_Xarray, f: Callable, reflexive: bool = False + ) -> T_Xarray: raise NotImplementedError - def __add__(self, other): + def __add__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.add) - def __sub__(self, other): + def __sub__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.sub) - def __mul__(self, other): + def __mul__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.mul) - def __pow__(self, other): + def __pow__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.pow) - def __truediv__(self, other): + def __truediv__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.truediv) - def __floordiv__(self, other): + def __floordiv__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.floordiv) - def __mod__(self, other): + def __mod__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.mod) - def __and__(self, other): + def __and__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.and_) - def __xor__(self, other): + def __xor__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.xor) - def __or__(self, other): + def __or__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.or_) - def __lshift__(self, other): + def __lshift__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.lshift) - def __rshift__(self, other): + def __rshift__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.rshift) - def __lt__(self, other): + def __lt__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.lt) - def __le__(self, other): + def __le__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.le) - def __gt__(self, other): + def __gt__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.gt) - def __ge__(self, other): + def __ge__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.ge) - def __eq__(self, other): + def __eq__(self, other: T_Xarray) -> T_Xarray: # type:ignore[override] return self._binary_op(other, nputils.array_eq) - def __ne__(self, other): + def __ne__(self, other: T_Xarray) -> T_Xarray: # type:ignore[override] return self._binary_op(other, nputils.array_ne) - def __radd__(self, other): + def __radd__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.add, reflexive=True) - def __rsub__(self, other): + def __rsub__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.sub, reflexive=True) - def __rmul__(self, other): + def __rmul__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.mul, reflexive=True) - def __rpow__(self, other): + def __rpow__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.pow, reflexive=True) - def __rtruediv__(self, other): + def __rtruediv__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.truediv, reflexive=True) - def __rfloordiv__(self, other): + def __rfloordiv__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.floordiv, reflexive=True) - def __rmod__(self, other): + def __rmod__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.mod, reflexive=True) - def __rand__(self, other): + def __rand__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.and_, reflexive=True) - def __rxor__(self, other): + def __rxor__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.xor, reflexive=True) - def __ror__(self, other): + def __ror__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.or_, reflexive=True) __add__.__doc__ = operator.add.__doc__ diff --git a/xarray/core/_typed_ops.pyi b/xarray/core/_typed_ops.pyi deleted file mode 100644 index 9e2ba2d3a06..00000000000 --- a/xarray/core/_typed_ops.pyi +++ /dev/null @@ -1,782 +0,0 @@ -"""Stub file for mixin classes with arithmetic operators.""" -# This file was generated using xarray.util.generate_ops. Do not edit manually. - -from typing import NoReturn, TypeVar, overload - -import numpy as np -from numpy.typing import ArrayLike - -from .dataarray import DataArray -from .dataset import Dataset -from .groupby import DataArrayGroupBy, DatasetGroupBy, GroupBy -from .types import ( - DaCompatible, - DsCompatible, - GroupByIncompatible, - ScalarOrArray, - VarCompatible, -) -from .variable import Variable - -try: - from dask.array import Array as DaskArray -except ImportError: - DaskArray = np.ndarray # type: ignore - -# DatasetOpsMixin etc. are parent classes of Dataset etc. -# Because of https://github.com/pydata/xarray/issues/5755, we redefine these. Generally -# we use the ones in `types`. (We're open to refining this, and potentially integrating -# the `py` & `pyi` files to simplify them.) -T_Dataset = TypeVar("T_Dataset", bound="DatasetOpsMixin") -T_DataArray = TypeVar("T_DataArray", bound="DataArrayOpsMixin") -T_Variable = TypeVar("T_Variable", bound="VariableOpsMixin") - -class DatasetOpsMixin: - __slots__ = () - def _binary_op(self, other, f, reflexive=...): ... - def __add__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... - def __sub__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... - def __mul__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... - def __pow__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... - def __truediv__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... - def __floordiv__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... - def __mod__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... - def __and__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... - def __xor__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... - def __or__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... - def __lshift__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... - def __rshift__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... - def __lt__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... - def __le__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... - def __gt__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... - def __ge__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... - def __eq__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... # type: ignore[override] - def __ne__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... # type: ignore[override] - def __radd__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... - def __rsub__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... - def __rmul__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... - def __rpow__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... - def __rtruediv__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... - def __rfloordiv__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... - def __rmod__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... - def __rand__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... - def __rxor__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... - def __ror__(self: T_Dataset, other: DsCompatible) -> T_Dataset: ... - def _inplace_binary_op(self, other, f): ... - def _unary_op(self, f, *args, **kwargs): ... - def __neg__(self: T_Dataset) -> T_Dataset: ... - def __pos__(self: T_Dataset) -> T_Dataset: ... - def __abs__(self: T_Dataset) -> T_Dataset: ... - def __invert__(self: T_Dataset) -> T_Dataset: ... - def round(self: T_Dataset, *args, **kwargs) -> T_Dataset: ... - def argsort(self: T_Dataset, *args, **kwargs) -> T_Dataset: ... - def conj(self: T_Dataset, *args, **kwargs) -> T_Dataset: ... - def conjugate(self: T_Dataset, *args, **kwargs) -> T_Dataset: ... - -class DataArrayOpsMixin: - __slots__ = () - def _binary_op(self, other, f, reflexive=...): ... - @overload - def __add__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __add__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __add__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - @overload - def __sub__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __sub__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __sub__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - @overload - def __mul__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __mul__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __mul__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - @overload - def __pow__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __pow__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __pow__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - @overload - def __truediv__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __truediv__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __truediv__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - @overload - def __floordiv__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __floordiv__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __floordiv__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - @overload - def __mod__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __mod__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __mod__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - @overload - def __and__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __and__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __and__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - @overload - def __xor__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __xor__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __xor__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - @overload - def __or__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __or__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __or__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - @overload - def __lshift__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __lshift__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __lshift__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - @overload - def __rshift__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rshift__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __rshift__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - @overload - def __lt__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __lt__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __lt__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - @overload - def __le__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __le__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __le__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - @overload - def __gt__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __gt__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __gt__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - @overload - def __ge__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __ge__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __ge__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - @overload # type: ignore[override] - def __eq__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __eq__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __eq__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - @overload # type: ignore[override] - def __ne__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __ne__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __ne__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - @overload - def __radd__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __radd__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __radd__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - @overload - def __rsub__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rsub__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __rsub__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - @overload - def __rmul__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rmul__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __rmul__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - @overload - def __rpow__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rpow__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __rpow__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - @overload - def __rtruediv__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rtruediv__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __rtruediv__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - @overload - def __rfloordiv__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rfloordiv__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __rfloordiv__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - @overload - def __rmod__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rmod__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __rmod__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - @overload - def __rand__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rand__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __rand__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - @overload - def __rxor__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rxor__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __rxor__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - @overload - def __ror__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __ror__(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def __ror__(self: T_DataArray, other: DaCompatible) -> T_DataArray: ... - def _inplace_binary_op(self, other, f): ... - def _unary_op(self, f, *args, **kwargs): ... - def __neg__(self: T_DataArray) -> T_DataArray: ... - def __pos__(self: T_DataArray) -> T_DataArray: ... - def __abs__(self: T_DataArray) -> T_DataArray: ... - def __invert__(self: T_DataArray) -> T_DataArray: ... - def round(self: T_DataArray, *args, **kwargs) -> T_DataArray: ... - def argsort(self: T_DataArray, *args, **kwargs) -> T_DataArray: ... - def conj(self: T_DataArray, *args, **kwargs) -> T_DataArray: ... - def conjugate(self: T_DataArray, *args, **kwargs) -> T_DataArray: ... - -class VariableOpsMixin: - __slots__ = () - def _binary_op(self, other, f, reflexive=...): ... - @overload - def __add__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __add__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __add__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - @overload - def __sub__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __sub__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __sub__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - @overload - def __mul__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __mul__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __mul__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - @overload - def __pow__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __pow__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __pow__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - @overload - def __truediv__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __truediv__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __truediv__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - @overload - def __floordiv__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __floordiv__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __floordiv__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - @overload - def __mod__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __mod__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __mod__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - @overload - def __and__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __and__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __and__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - @overload - def __xor__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __xor__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __xor__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - @overload - def __or__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __or__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __or__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - @overload - def __lshift__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __lshift__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __lshift__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - @overload - def __rshift__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rshift__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __rshift__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - @overload - def __lt__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __lt__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __lt__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - @overload - def __le__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __le__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __le__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - @overload - def __gt__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __gt__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __gt__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - @overload - def __ge__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __ge__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __ge__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - @overload # type: ignore[override] - def __eq__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __eq__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __eq__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - @overload # type: ignore[override] - def __ne__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __ne__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __ne__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - @overload - def __radd__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __radd__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __radd__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - @overload - def __rsub__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rsub__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __rsub__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - @overload - def __rmul__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rmul__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __rmul__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - @overload - def __rpow__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rpow__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __rpow__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - @overload - def __rtruediv__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rtruediv__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __rtruediv__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - @overload - def __rfloordiv__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rfloordiv__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __rfloordiv__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - @overload - def __rmod__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rmod__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __rmod__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - @overload - def __rand__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rand__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __rand__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - @overload - def __rxor__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rxor__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __rxor__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - @overload - def __ror__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __ror__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __ror__(self: T_Variable, other: VarCompatible) -> T_Variable: ... - def _inplace_binary_op(self, other, f): ... - def _unary_op(self, f, *args, **kwargs): ... - def __neg__(self: T_Variable) -> T_Variable: ... - def __pos__(self: T_Variable) -> T_Variable: ... - def __abs__(self: T_Variable) -> T_Variable: ... - def __invert__(self: T_Variable) -> T_Variable: ... - def round(self: T_Variable, *args, **kwargs) -> T_Variable: ... - def argsort(self: T_Variable, *args, **kwargs) -> T_Variable: ... - def conj(self: T_Variable, *args, **kwargs) -> T_Variable: ... - def conjugate(self: T_Variable, *args, **kwargs) -> T_Variable: ... - -class DatasetGroupByOpsMixin: - __slots__ = () - def _binary_op(self, other, f, reflexive=...): ... - @overload - def __add__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __add__(self, other: "DataArray") -> "Dataset": ... - @overload - def __add__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __sub__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __sub__(self, other: "DataArray") -> "Dataset": ... - @overload - def __sub__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __mul__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __mul__(self, other: "DataArray") -> "Dataset": ... - @overload - def __mul__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __pow__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __pow__(self, other: "DataArray") -> "Dataset": ... - @overload - def __pow__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __truediv__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __truediv__(self, other: "DataArray") -> "Dataset": ... - @overload - def __truediv__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __floordiv__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __floordiv__(self, other: "DataArray") -> "Dataset": ... - @overload - def __floordiv__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __mod__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __mod__(self, other: "DataArray") -> "Dataset": ... - @overload - def __mod__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __and__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __and__(self, other: "DataArray") -> "Dataset": ... - @overload - def __and__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __xor__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __xor__(self, other: "DataArray") -> "Dataset": ... - @overload - def __xor__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __or__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __or__(self, other: "DataArray") -> "Dataset": ... - @overload - def __or__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __lshift__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __lshift__(self, other: "DataArray") -> "Dataset": ... - @overload - def __lshift__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __rshift__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rshift__(self, other: "DataArray") -> "Dataset": ... - @overload - def __rshift__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __lt__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __lt__(self, other: "DataArray") -> "Dataset": ... - @overload - def __lt__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __le__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __le__(self, other: "DataArray") -> "Dataset": ... - @overload - def __le__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __gt__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __gt__(self, other: "DataArray") -> "Dataset": ... - @overload - def __gt__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __ge__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __ge__(self, other: "DataArray") -> "Dataset": ... - @overload - def __ge__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload # type: ignore[override] - def __eq__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __eq__(self, other: "DataArray") -> "Dataset": ... - @overload - def __eq__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload # type: ignore[override] - def __ne__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __ne__(self, other: "DataArray") -> "Dataset": ... - @overload - def __ne__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __radd__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __radd__(self, other: "DataArray") -> "Dataset": ... - @overload - def __radd__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __rsub__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rsub__(self, other: "DataArray") -> "Dataset": ... - @overload - def __rsub__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __rmul__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rmul__(self, other: "DataArray") -> "Dataset": ... - @overload - def __rmul__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __rpow__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rpow__(self, other: "DataArray") -> "Dataset": ... - @overload - def __rpow__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __rtruediv__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rtruediv__(self, other: "DataArray") -> "Dataset": ... - @overload - def __rtruediv__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __rfloordiv__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rfloordiv__(self, other: "DataArray") -> "Dataset": ... - @overload - def __rfloordiv__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __rmod__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rmod__(self, other: "DataArray") -> "Dataset": ... - @overload - def __rmod__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __rand__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rand__(self, other: "DataArray") -> "Dataset": ... - @overload - def __rand__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __rxor__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rxor__(self, other: "DataArray") -> "Dataset": ... - @overload - def __rxor__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __ror__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __ror__(self, other: "DataArray") -> "Dataset": ... - @overload - def __ror__(self, other: GroupByIncompatible) -> NoReturn: ... - -class DataArrayGroupByOpsMixin: - __slots__ = () - def _binary_op(self, other, f, reflexive=...): ... - @overload - def __add__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __add__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __add__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __sub__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __sub__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __sub__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __mul__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __mul__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __mul__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __pow__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __pow__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __pow__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __truediv__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __truediv__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __truediv__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __floordiv__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __floordiv__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __floordiv__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __mod__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __mod__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __mod__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __and__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __and__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __and__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __xor__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __xor__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __xor__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __or__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __or__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __or__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __lshift__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __lshift__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __lshift__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __rshift__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rshift__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __rshift__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __lt__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __lt__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __lt__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __le__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __le__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __le__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __gt__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __gt__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __gt__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __ge__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __ge__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __ge__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload # type: ignore[override] - def __eq__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __eq__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __eq__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload # type: ignore[override] - def __ne__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __ne__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __ne__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __radd__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __radd__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __radd__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __rsub__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rsub__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __rsub__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __rmul__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rmul__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __rmul__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __rpow__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rpow__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __rpow__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __rtruediv__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rtruediv__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __rtruediv__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __rfloordiv__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rfloordiv__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __rfloordiv__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __rmod__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rmod__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __rmod__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __rand__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rand__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __rand__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __rxor__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __rxor__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __rxor__(self, other: GroupByIncompatible) -> NoReturn: ... - @overload - def __ror__(self, other: T_Dataset) -> T_Dataset: ... - @overload - def __ror__(self, other: T_DataArray) -> T_DataArray: ... - @overload - def __ror__(self, other: GroupByIncompatible) -> NoReturn: ... diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index 724a5fc2580..0b9786dc2b7 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -4618,25 +4618,22 @@ def _unary_op(self, f: Callable, *args, **kwargs) -> Self: return da def _binary_op( - self, - other: T_Xarray, - f: Callable, - reflexive: bool = False, - ) -> T_Xarray: + self, other: DaCompatible, f: Callable, reflexive: bool = False + ) -> Self: from xarray.core.groupby import GroupBy if isinstance(other, (Dataset, GroupBy)): return NotImplemented if isinstance(other, DataArray): align_type = OPTIONS["arithmetic_join"] - self, other = align(self, other, join=align_type, copy=False) - other_variable = getattr(other, "variable", other) + self, other = align(self, other, join=align_type, copy=False) # type: ignore[type-var,assignment] + other_variable_or_arraylike: DaCompatible = getattr(other, "variable", other) other_coords = getattr(other, "coords", None) variable = ( - f(self.variable, other_variable) + f(self.variable, other_variable_or_arraylike) if not reflexive - else f(other_variable, self.variable) + else f(other_variable_or_arraylike, self.variable) ) coords, indexes = self.coords._merge_raw(other_coords, reflexive) name = self._result_name(other) diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index 44016e87306..d24a62414ea 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -1475,13 +1475,20 @@ def __bool__(self) -> bool: def __iter__(self) -> Iterator[Hashable]: return iter(self.data_vars) - def __array__(self, dtype=None): - raise TypeError( - "cannot directly convert an xarray.Dataset into a " - "numpy array. Instead, create an xarray.DataArray " - "first, either with indexing on the Dataset or by " - "invoking the `to_array()` method." - ) + if TYPE_CHECKING: + # needed because __getattr__ is returning Any and otherwise + # this class counts as part of the SupportsArray Protocol + __array__ = None + + else: + + def __array__(self, dtype=None): + raise TypeError( + "cannot directly convert an xarray.Dataset into a " + "numpy array. Instead, create an xarray.DataArray " + "first, either with indexing on the Dataset or by " + "invoking the `to_array()` method." + ) @property def nbytes(self) -> int: diff --git a/xarray/core/rolling.py b/xarray/core/rolling.py index c6911cbe65b..b85092982e3 100644 --- a/xarray/core/rolling.py +++ b/xarray/core/rolling.py @@ -61,6 +61,11 @@ class Rolling(Generic[T_Xarray]): __slots__ = ("obj", "window", "min_periods", "center", "dim") _attributes = ("window", "min_periods", "center", "dim") + dim: list[Hashable] + window: list[int] + center: list[bool] + obj: T_Xarray + min_periods: int def __init__( self, @@ -91,8 +96,8 @@ def __init__( ------- rolling : type of input argument """ - self.dim: list[Hashable] = [] - self.window: list[int] = [] + self.dim = [] + self.window = [] for d, w in windows.items(): self.dim.append(d) if w <= 0: @@ -100,7 +105,7 @@ def __init__( self.window.append(w) self.center = self._mapping_to_list(center, default=False) - self.obj: T_Xarray = obj + self.obj = obj missing_dims = tuple(dim for dim in self.dim if dim not in self.obj.dims) if missing_dims: @@ -814,6 +819,10 @@ class Coarsen(CoarsenArithmetic, Generic[T_Xarray]): ) _attributes = ("windows", "side", "trim_excess") obj: T_Xarray + windows: Mapping[Hashable, int] + side: SideOptions | Mapping[Hashable, SideOptions] + boundary: CoarsenBoundaryOptions + coord_func: Mapping[Hashable, str | Callable] def __init__( self, @@ -855,12 +864,15 @@ def __init__( f"Window dimensions {missing_dims} not found in {self.obj.__class__.__name__} " f"dimensions {tuple(self.obj.dims)}" ) - if not utils.is_dict_like(coord_func): - coord_func = {d: coord_func for d in self.obj.dims} # type: ignore[misc] + + if utils.is_dict_like(coord_func): + coord_func_map = coord_func + else: + coord_func_map = {d: coord_func for d in self.obj.dims} for c in self.obj.coords: - if c not in coord_func: - coord_func[c] = duck_array_ops.mean # type: ignore[index] - self.coord_func: Mapping[Hashable, str | Callable] = coord_func + if c not in coord_func_map: + coord_func_map[c] = duck_array_ops.mean # type: ignore[index] + self.coord_func = coord_func_map def _get_keep_attrs(self, keep_attrs): if keep_attrs is None: diff --git a/xarray/core/types.py b/xarray/core/types.py index 6b6f9300631..073121b13b1 100644 --- a/xarray/core/types.py +++ b/xarray/core/types.py @@ -38,7 +38,6 @@ from xarray.core.coordinates import Coordinates from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset - from xarray.core.groupby import DataArrayGroupBy, GroupBy from xarray.core.indexes import Index, Indexes from xarray.core.utils import Frozen from xarray.core.variable import Variable @@ -176,10 +175,10 @@ def copy( T_DuckArray = TypeVar("T_DuckArray", bound=Any) ScalarOrArray = Union["ArrayLike", np.generic, np.ndarray, "DaskArray"] -DsCompatible = Union["Dataset", "DataArray", "Variable", "GroupBy", "ScalarOrArray"] -DaCompatible = Union["DataArray", "Variable", "DataArrayGroupBy", "ScalarOrArray"] VarCompatible = Union["Variable", "ScalarOrArray"] -GroupByIncompatible = Union["Variable", "GroupBy"] +DaCompatible = Union["DataArray", "VarCompatible"] +DsCompatible = Union["Dataset", "DaCompatible"] +GroupByCompatible = Union["Dataset", "DataArray"] Dims = Union[str, Iterable[Hashable], "ellipsis", None] OrderedDims = Union[str, Sequence[Union[Hashable, "ellipsis"]], "ellipsis", None] diff --git a/xarray/core/weighted.py b/xarray/core/weighted.py index 82ffe684ec7..b1ea1ee625c 100644 --- a/xarray/core/weighted.py +++ b/xarray/core/weighted.py @@ -324,6 +324,7 @@ def _weighted_quantile( def _get_h(n: float, q: np.ndarray, method: QUANTILE_METHODS) -> np.ndarray: """Return the interpolation parameter.""" # Note that options are not yet exposed in the public API. + h: np.ndarray if method == "linear": h = (n - 1) * q + 1 elif method == "interpolated_inverted_cdf": diff --git a/xarray/tests/test_groupby.py b/xarray/tests/test_groupby.py index e143e2b8e03..320ba999318 100644 --- a/xarray/tests/test_groupby.py +++ b/xarray/tests/test_groupby.py @@ -810,9 +810,9 @@ def test_groupby_math_more() -> None: with pytest.raises(TypeError, match=r"only support binary ops"): grouped + 1 # type: ignore[operator] with pytest.raises(TypeError, match=r"only support binary ops"): - grouped + grouped + grouped + grouped # type: ignore[operator] with pytest.raises(TypeError, match=r"in-place operations"): - ds += grouped + ds += grouped # type: ignore[arg-type] ds = Dataset( { diff --git a/xarray/tests/test_typed_ops.py b/xarray/tests/test_typed_ops.py new file mode 100644 index 00000000000..1d4ef89ae29 --- /dev/null +++ b/xarray/tests/test_typed_ops.py @@ -0,0 +1,246 @@ +import numpy as np + +from xarray import DataArray, Dataset, Variable + + +def test_variable_typed_ops() -> None: + """Tests for type checking of typed_ops on Variable""" + + var = Variable(dims=["t"], data=[1, 2, 3]) + + def _test(var: Variable) -> None: + # mypy checks the input type + assert isinstance(var, Variable) + + _int: int = 1 + _list = [1, 2, 3] + _ndarray = np.array([1, 2, 3]) + + # __add__ as an example of binary ops + _test(var + _int) + _test(var + _list) + _test(var + _ndarray) + _test(var + var) + + # __radd__ as an example of reflexive binary ops + _test(_int + var) + _test(_list + var) + _test(_ndarray + var) # type: ignore[arg-type] # numpy problem + + # __eq__ as an example of cmp ops + _test(var == _int) + _test(var == _list) + _test(var == _ndarray) + _test(_int == var) # type: ignore[arg-type] # typeshed problem + _test(_list == var) # type: ignore[arg-type] # typeshed problem + _test(_ndarray == var) + + # __lt__ as another example of cmp ops + _test(var < _int) + _test(var < _list) + _test(var < _ndarray) + _test(_int > var) + _test(_list > var) + _test(_ndarray > var) # type: ignore[arg-type] # numpy problem + + # __iadd__ as an example of inplace binary ops + var += _int + var += _list + var += _ndarray + + # __neg__ as an example of unary ops + _test(-var) + + +def test_dataarray_typed_ops() -> None: + """Tests for type checking of typed_ops on DataArray""" + + da = DataArray([1, 2, 3], dims=["t"]) + + def _test(da: DataArray) -> None: + # mypy checks the input type + assert isinstance(da, DataArray) + + _int: int = 1 + _list = [1, 2, 3] + _ndarray = np.array([1, 2, 3]) + _var = Variable(dims=["t"], data=[1, 2, 3]) + + # __add__ as an example of binary ops + _test(da + _int) + _test(da + _list) + _test(da + _ndarray) + _test(da + _var) + _test(da + da) + + # __radd__ as an example of reflexive binary ops + _test(_int + da) + _test(_list + da) + _test(_ndarray + da) # type: ignore[arg-type] # numpy problem + _test(_var + da) + + # __eq__ as an example of cmp ops + _test(da == _int) + _test(da == _list) + _test(da == _ndarray) + _test(da == _var) + _test(_int == da) # type: ignore[arg-type] # typeshed problem + _test(_list == da) # type: ignore[arg-type] # typeshed problem + _test(_ndarray == da) + _test(_var == da) + + # __lt__ as another example of cmp ops + _test(da < _int) + _test(da < _list) + _test(da < _ndarray) + _test(da < _var) + _test(_int > da) + _test(_list > da) + _test(_ndarray > da) # type: ignore[arg-type] # numpy problem + _test(_var > da) + + # __iadd__ as an example of inplace binary ops + da += _int + da += _list + da += _ndarray + da += _var + + # __neg__ as an example of unary ops + _test(-da) + + +def test_dataset_typed_ops() -> None: + """Tests for type checking of typed_ops on Dataset""" + + ds = Dataset({"a": ("t", [1, 2, 3])}) + + def _test(ds: Dataset) -> None: + # mypy checks the input type + assert isinstance(ds, Dataset) + + _int: int = 1 + _list = [1, 2, 3] + _ndarray = np.array([1, 2, 3]) + _var = Variable(dims=["t"], data=[1, 2, 3]) + _da = DataArray([1, 2, 3], dims=["t"]) + + # __add__ as an example of binary ops + _test(ds + _int) + _test(ds + _list) + _test(ds + _ndarray) + _test(ds + _var) + _test(ds + _da) + _test(ds + ds) + + # __radd__ as an example of reflexive binary ops + _test(_int + ds) + _test(_list + ds) + _test(_ndarray + ds) + _test(_var + ds) + _test(_da + ds) + + # __eq__ as an example of cmp ops + _test(ds == _int) + _test(ds == _list) + _test(ds == _ndarray) + _test(ds == _var) + _test(ds == _da) + _test(_int == ds) # type: ignore[arg-type] # typeshed problem + _test(_list == ds) # type: ignore[arg-type] # typeshed problem + _test(_ndarray == ds) + _test(_var == ds) + _test(_da == ds) + + # __lt__ as another example of cmp ops + _test(ds < _int) + _test(ds < _list) + _test(ds < _ndarray) + _test(ds < _var) + _test(ds < _da) + _test(_int > ds) + _test(_list > ds) + _test(_ndarray > ds) # type: ignore[arg-type] # numpy problem + _test(_var > ds) + _test(_da > ds) + + # __iadd__ as an example of inplace binary ops + ds += _int + ds += _list + ds += _ndarray + ds += _var + ds += _da + + # __neg__ as an example of unary ops + _test(-ds) + + +def test_dataarray_groupy_typed_ops() -> None: + """Tests for type checking of typed_ops on DataArrayGroupBy""" + + da = DataArray([1, 2, 3], coords={"x": ("t", [1, 2, 2])}, dims=["t"]) + grp = da.groupby("x") + + def _testda(da: DataArray) -> None: + # mypy checks the input type + assert isinstance(da, DataArray) + + def _testds(ds: Dataset) -> None: + # mypy checks the input type + assert isinstance(ds, Dataset) + + _da = DataArray([5, 6], coords={"x": [1, 2]}, dims="x") + _ds = _da.to_dataset(name="a") + + # __add__ as an example of binary ops + _testda(grp + _da) + _testds(grp + _ds) + + # __radd__ as an example of reflexive binary ops + _testda(_da + grp) + _testds(_ds + grp) + + # __eq__ as an example of cmp ops + _testda(grp == _da) + _testda(_da == grp) + _testds(grp == _ds) + _testds(_ds == grp) + + # __lt__ as another example of cmp ops + _testda(grp < _da) + _testda(_da > grp) + _testds(grp < _ds) + _testds(_ds > grp) + + +def test_dataset_groupy_typed_ops() -> None: + """Tests for type checking of typed_ops on DatasetGroupBy""" + + ds = Dataset({"a": ("t", [1, 2, 3])}, coords={"x": ("t", [1, 2, 2])}) + grp = ds.groupby("x") + + def _test(ds: Dataset) -> None: + # mypy checks the input type + assert isinstance(ds, Dataset) + + _da = DataArray([5, 6], coords={"x": [1, 2]}, dims="x") + _ds = _da.to_dataset(name="a") + + # __add__ as an example of binary ops + _test(grp + _da) + _test(grp + _ds) + + # __radd__ as an example of reflexive binary ops + _test(_da + grp) + _test(_ds + grp) + + # __eq__ as an example of cmp ops + _test(grp == _da) + _test(_da == grp) + _test(grp == _ds) + _test(_ds == grp) + + # __lt__ as another example of cmp ops + _test(grp < _da) + _test(_da > grp) + _test(grp < _ds) + _test(_ds > grp) diff --git a/xarray/util/generate_ops.py b/xarray/util/generate_ops.py index cf0673e7cca..632ca06d295 100644 --- a/xarray/util/generate_ops.py +++ b/xarray/util/generate_ops.py @@ -3,14 +3,16 @@ For internal xarray development use only. Usage: - python xarray/util/generate_ops.py --module > xarray/core/_typed_ops.py - python xarray/util/generate_ops.py --stubs > xarray/core/_typed_ops.pyi + python xarray/util/generate_ops.py > xarray/core/_typed_ops.py """ # Note: the comments in https://github.com/pydata/xarray/pull/4904 provide some # background to some of the design choices made here. -import sys +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import Optional BINOPS_EQNE = (("__eq__", "nputils.array_eq"), ("__ne__", "nputils.array_ne")) BINOPS_CMP = ( @@ -74,155 +76,178 @@ ("conjugate", "ops.conjugate"), ) + +required_method_binary = """ + def _binary_op( + self, other: {other_type}, f: Callable, reflexive: bool = False + ) -> {return_type}: + raise NotImplementedError""" template_binop = """ - def {method}(self, other): + def {method}(self, other: {other_type}) -> {return_type}:{type_ignore} return self._binary_op(other, {func})""" +template_binop_overload = """ + @overload{overload_type_ignore} + def {method}(self, other: {overload_type}) -> NoReturn: + ... + + @overload + def {method}(self, other: {other_type}) -> {return_type}: + ... +""" template_reflexive = """ - def {method}(self, other): + def {method}(self, other: {other_type}) -> {return_type}: return self._binary_op(other, {func}, reflexive=True)""" + +required_method_inplace = """ + def _inplace_binary_op(self, other: {other_type}, f: Callable) -> Self: + raise NotImplementedError""" template_inplace = """ - def {method}(self, other): + def {method}(self, other: {other_type}) -> Self:{type_ignore} return self._inplace_binary_op(other, {func})""" + +required_method_unary = """ + def _unary_op(self, f: Callable, *args: Any, **kwargs: Any) -> Self: + raise NotImplementedError""" template_unary = """ - def {method}(self): + def {method}(self) -> Self: return self._unary_op({func})""" template_other_unary = """ - def {method}(self, *args, **kwargs): + def {method}(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op({func}, *args, **kwargs)""" -required_method_unary = """ - def _unary_op(self, f, *args, **kwargs): - raise NotImplementedError""" -required_method_binary = """ - def _binary_op(self, other, f, reflexive=False): - raise NotImplementedError""" -required_method_inplace = """ - def _inplace_binary_op(self, other, f): - raise NotImplementedError""" # For some methods we override return type `bool` defined by base class `object`. -OVERRIDE_TYPESHED = {"override": " # type: ignore[override]"} -NO_OVERRIDE = {"override": ""} - -# Note: in some of the overloads below the return value in reality is NotImplemented, -# which cannot accurately be expressed with type hints,e.g. Literal[NotImplemented] -# or type(NotImplemented) are not allowed and NoReturn has a different meaning. -# In such cases we are lending the type checkers a hand by specifying the return type -# of the corresponding reflexive method on `other` which will be called instead. -stub_ds = """\ - def {method}(self: T_Dataset, other: DsCompatible) -> T_Dataset: ...{override}""" -stub_da = """\ - @overload{override} - def {method}(self, other: T_Dataset) -> T_Dataset: ... - @overload - def {method}(self, other: "DatasetGroupBy") -> "Dataset": ... - @overload - def {method}(self: T_DataArray, other: DaCompatible) -> T_DataArray: ...""" -stub_var = """\ - @overload{override} - def {method}(self, other: T_Dataset) -> T_Dataset: ... - @overload - def {method}(self, other: T_DataArray) -> T_DataArray: ... - @overload - def {method}(self: T_Variable, other: VarCompatible) -> T_Variable: ...""" -stub_dsgb = """\ - @overload{override} - def {method}(self, other: T_Dataset) -> T_Dataset: ... - @overload - def {method}(self, other: "DataArray") -> "Dataset": ... - @overload - def {method}(self, other: GroupByIncompatible) -> NoReturn: ...""" -stub_dagb = """\ - @overload{override} - def {method}(self, other: T_Dataset) -> T_Dataset: ... - @overload - def {method}(self, other: T_DataArray) -> T_DataArray: ... - @overload - def {method}(self, other: GroupByIncompatible) -> NoReturn: ...""" -stub_unary = """\ - def {method}(self: {self_type}) -> {self_type}: ...""" -stub_other_unary = """\ - def {method}(self: {self_type}, *args, **kwargs) -> {self_type}: ...""" -stub_required_unary = """\ - def _unary_op(self, f, *args, **kwargs): ...""" -stub_required_binary = """\ - def _binary_op(self, other, f, reflexive=...): ...""" -stub_required_inplace = """\ - def _inplace_binary_op(self, other, f): ...""" - - -def unops(self_type): - extra_context = {"self_type": self_type} +# We need to add "# type: ignore[override]" +# Keep an eye out for: +# https://discuss.python.org/t/make-type-hints-for-eq-of-primitives-less-strict/34240 +# The type ignores might not be neccesary anymore at some point. +# +# We require a "hack" to tell type checkers that e.g. Variable + DataArray = DataArray +# In reality this returns NotImplementes, but this is not a valid type in python 3.9. +# Therefore, we use NoReturn which mypy seems to recognise! +# TODO: change once python 3.10 is the minimum. +# +# Mypy seems to require that __iadd__ and __add__ have the same signature. +# This requires some extra type: ignores[misc] in the inplace methods :/ + + +def _type_ignore(ignore: str) -> str: + return f" # type:ignore[{ignore}]" if ignore else "" + + +FuncType = Sequence[tuple[Optional[str], Optional[str]]] +OpsType = tuple[FuncType, str, dict[str, str]] + + +def binops( + other_type: str, return_type: str = "Self", type_ignore_eq: str = "override" +) -> list[OpsType]: + extras = {"other_type": other_type, "return_type": return_type} return [ - ([(None, None)], required_method_unary, stub_required_unary, {}), - (UNARY_OPS, template_unary, stub_unary, extra_context), - (OTHER_UNARY_METHODS, template_other_unary, stub_other_unary, extra_context), + ([(None, None)], required_method_binary, extras), + (BINOPS_NUM + BINOPS_CMP, template_binop, extras | {"type_ignore": ""}), + ( + BINOPS_EQNE, + template_binop, + extras | {"type_ignore": _type_ignore(type_ignore_eq)}, + ), + (BINOPS_REFLEXIVE, template_reflexive, extras), ] -def binops(stub=""): +def binops_overload( + other_type: str, + overload_type: str, + return_type: str = "Self", + type_ignore_eq: str = "override", +) -> list[OpsType]: + extras = {"other_type": other_type, "return_type": return_type} return [ - ([(None, None)], required_method_binary, stub_required_binary, {}), - (BINOPS_NUM + BINOPS_CMP, template_binop, stub, NO_OVERRIDE), - (BINOPS_EQNE, template_binop, stub, OVERRIDE_TYPESHED), - (BINOPS_REFLEXIVE, template_reflexive, stub, NO_OVERRIDE), + ([(None, None)], required_method_binary, extras), + ( + BINOPS_NUM + BINOPS_CMP, + template_binop_overload + template_binop, + extras + | { + "overload_type": overload_type, + "type_ignore": "", + "overload_type_ignore": "", + }, + ), + ( + BINOPS_EQNE, + template_binop_overload + template_binop, + extras + | { + "overload_type": overload_type, + "type_ignore": "", + "overload_type_ignore": _type_ignore(type_ignore_eq), + }, + ), + (BINOPS_REFLEXIVE, template_reflexive, extras), ] -def inplace(): +def inplace(other_type: str, type_ignore: str = "") -> list[OpsType]: + extras = {"other_type": other_type} return [ - ([(None, None)], required_method_inplace, stub_required_inplace, {}), - (BINOPS_INPLACE, template_inplace, "", {}), + ([(None, None)], required_method_inplace, extras), + ( + BINOPS_INPLACE, + template_inplace, + extras | {"type_ignore": _type_ignore(type_ignore)}, + ), + ] + + +def unops() -> list[OpsType]: + return [ + ([(None, None)], required_method_unary, {}), + (UNARY_OPS, template_unary, {}), + (OTHER_UNARY_METHODS, template_other_unary, {}), ] ops_info = {} -ops_info["DatasetOpsMixin"] = binops(stub_ds) + inplace() + unops("T_Dataset") -ops_info["DataArrayOpsMixin"] = binops(stub_da) + inplace() + unops("T_DataArray") -ops_info["VariableOpsMixin"] = binops(stub_var) + inplace() + unops("T_Variable") -ops_info["DatasetGroupByOpsMixin"] = binops(stub_dsgb) -ops_info["DataArrayGroupByOpsMixin"] = binops(stub_dagb) +ops_info["DatasetOpsMixin"] = ( + binops(other_type="DsCompatible") + inplace(other_type="DsCompatible") + unops() +) +ops_info["DataArrayOpsMixin"] = ( + binops(other_type="DaCompatible") + inplace(other_type="DaCompatible") + unops() +) +ops_info["VariableOpsMixin"] = ( + binops_overload(other_type="VarCompatible", overload_type="T_DataArray") + + inplace(other_type="VarCompatible", type_ignore="misc") + + unops() +) +ops_info["DatasetGroupByOpsMixin"] = binops( + other_type="GroupByCompatible", return_type="Dataset" +) +ops_info["DataArrayGroupByOpsMixin"] = binops( + other_type="T_Xarray", return_type="T_Xarray" +) MODULE_PREAMBLE = '''\ """Mixin classes with arithmetic operators.""" # This file was generated using xarray.util.generate_ops. Do not edit manually. -import operator - -from . import nputils, ops''' - -STUBFILE_PREAMBLE = '''\ -"""Stub file for mixin classes with arithmetic operators.""" -# This file was generated using xarray.util.generate_ops. Do not edit manually. - -from typing import NoReturn, TypeVar, overload +from __future__ import annotations -import numpy as np -from numpy.typing import ArrayLike +import operator +from typing import TYPE_CHECKING, Any, Callable, NoReturn, overload -from .dataarray import DataArray -from .dataset import Dataset -from .groupby import DataArrayGroupBy, DatasetGroupBy, GroupBy -from .types import ( +from xarray.core import nputils, ops +from xarray.core.types import ( DaCompatible, DsCompatible, - GroupByIncompatible, - ScalarOrArray, + GroupByCompatible, + Self, + T_DataArray, + T_Xarray, VarCompatible, ) -from .variable import Variable -try: - from dask.array import Array as DaskArray -except ImportError: - DaskArray = np.ndarray # type: ignore - -# DatasetOpsMixin etc. are parent classes of Dataset etc. -# Because of https://github.com/pydata/xarray/issues/5755, we redefine these. Generally -# we use the ones in `types`. (We're open to refining this, and potentially integrating -# the `py` & `pyi` files to simplify them.) -T_Dataset = TypeVar("T_Dataset", bound="DatasetOpsMixin") -T_DataArray = TypeVar("T_DataArray", bound="DataArrayOpsMixin") -T_Variable = TypeVar("T_Variable", bound="VariableOpsMixin")''' +if TYPE_CHECKING: + from xarray.core.dataset import Dataset''' CLASS_PREAMBLE = """{newline} @@ -233,35 +258,28 @@ class {cls_name}: {method}.__doc__ = {func}.__doc__""" -def render(ops_info, is_module): +def render(ops_info: dict[str, list[OpsType]]) -> Iterator[str]: """Render the module or stub file.""" - yield MODULE_PREAMBLE if is_module else STUBFILE_PREAMBLE + yield MODULE_PREAMBLE for cls_name, method_blocks in ops_info.items(): - yield CLASS_PREAMBLE.format(cls_name=cls_name, newline="\n" * is_module) - yield from _render_classbody(method_blocks, is_module) + yield CLASS_PREAMBLE.format(cls_name=cls_name, newline="\n") + yield from _render_classbody(method_blocks) -def _render_classbody(method_blocks, is_module): - for method_func_pairs, method_template, stub_template, extra in method_blocks: - template = method_template if is_module else stub_template +def _render_classbody(method_blocks: list[OpsType]) -> Iterator[str]: + for method_func_pairs, template, extra in method_blocks: if template: for method, func in method_func_pairs: yield template.format(method=method, func=func, **extra) - if is_module: - yield "" - for method_func_pairs, *_ in method_blocks: - for method, func in method_func_pairs: - if method and func: - yield COPY_DOCSTRING.format(method=method, func=func) + yield "" + for method_func_pairs, *_ in method_blocks: + for method, func in method_func_pairs: + if method and func: + yield COPY_DOCSTRING.format(method=method, func=func) if __name__ == "__main__": - option = sys.argv[1].lower() if len(sys.argv) == 2 else None - if option not in {"--module", "--stubs"}: - raise SystemExit(f"Usage: {sys.argv[0]} --module | --stubs") - is_module = option == "--module" - - for line in render(ops_info, is_module): + for line in render(ops_info): print(line) From bac90ab067d7437de875ec57d90d863169e70429 Mon Sep 17 00:00:00 2001 From: Justus Magin Date: Mon, 25 Sep 2023 06:46:48 +0200 Subject: [PATCH 14/17] adapt to NEP 51 (#8064) * convert string and bytes items to standard python types * [test-upstream] * modify the expected error message --- xarray/core/formatting.py | 2 ++ xarray/tests/test_dataset.py | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/xarray/core/formatting.py b/xarray/core/formatting.py index 3bfe902f0a3..942bf5891ca 100644 --- a/xarray/core/formatting.py +++ b/xarray/core/formatting.py @@ -160,6 +160,8 @@ def format_item(x, timedelta_format=None, quote_strings=True): if isinstance(x, (np.timedelta64, timedelta)): return format_timedelta(x, timedelta_format=timedelta_format) elif isinstance(x, (str, bytes)): + if hasattr(x, "dtype"): + x = x.item() return repr(x) if quote_strings else x elif hasattr(x, "dtype") and np.issubdtype(x.dtype, np.floating): return f"{x.item():.4}" diff --git a/xarray/tests/test_dataset.py b/xarray/tests/test_dataset.py index c832663ecff..3fb29e01ebb 100644 --- a/xarray/tests/test_dataset.py +++ b/xarray/tests/test_dataset.py @@ -4116,7 +4116,8 @@ def test_setitem(self) -> None: data4[{"dim2": [2, 3]}] = data3["var1"][{"dim2": [3, 4]}].values data5 = data4.astype(str) data5["var4"] = data4["var1"] - err_msg = "could not convert string to float: 'a'" + # convert to `np.str_('a')` once `numpy<2.0` has been dropped + err_msg = "could not convert string to float: .*'a'.*" with pytest.raises(ValueError, match=err_msg): data5[{"dim2": 1}] = "a" From da647b06312bd93c3412ddd712bf7ecb52e3f28b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kai=20M=C3=BChlbauer?= Date: Mon, 25 Sep 2023 13:30:33 +0200 Subject: [PATCH 15/17] decode variable with mismatched coordinate attribute (#8195) * decode variable with mismatched coordinate attribute, warn/raise meaningful error * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update xarray/conventions.py Co-authored-by: Illviljan <14371165+Illviljan@users.noreply.github.com> * use set comparison as suggested by review * use emit_user_level_warning for all occurrences * fix typing and docstring * fix typing and docstring * silently ignore names of missing variables as per review * only decode if there is at least one variable matching a coordinate * fix typing * update docstring * Apply suggestions from code review Co-authored-by: Deepak Cherian * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add review suggestion Co-authored-by: Deepak Cherian * Fix test --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Illviljan <14371165+Illviljan@users.noreply.github.com> Co-authored-by: Deepak Cherian --- doc/whats-new.rst | 2 ++ xarray/backends/api.py | 6 ++++ xarray/conventions.py | 54 ++++++++++++++++---------------- xarray/tests/test_conventions.py | 27 +++++++++++++++- 4 files changed, 61 insertions(+), 28 deletions(-) diff --git a/doc/whats-new.rst b/doc/whats-new.rst index 4307c2829ca..c37a3213793 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -87,6 +87,8 @@ Bug fixes issues (:issue:`7817`, :issue:`7942`, :issue:`7790`, :issue:`6191`, :issue:`7096`, :issue:`1064`, :pull:`7827`). By `Kai Mühlbauer `_. +- Fixed a bug where inaccurate ``coordinates`` silently failed to decode variable (:issue:`1809`, :pull:`8195`). + By `Kai Mühlbauer `_ - ``.rolling_exp`` functions no longer mistakenly lose non-dimensioned coords (:issue:`6528`, :pull:`8114`) By `Maximilian Roos `_. diff --git a/xarray/backends/api.py b/xarray/backends/api.py index 58a05aeddce..7ca4377e4cf 100644 --- a/xarray/backends/api.py +++ b/xarray/backends/api.py @@ -488,6 +488,9 @@ def open_dataset( as coordinate variables. - "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and other attributes as coordinate variables. + + Only existing variables can be set as coordinates. Missing variables + will be silently ignored. drop_variables: str or iterable of str, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or @@ -691,6 +694,9 @@ def open_dataarray( as coordinate variables. - "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and other attributes as coordinate variables. + + Only existing variables can be set as coordinates. Missing variables + will be silently ignored. drop_variables: str or iterable of str, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or diff --git a/xarray/conventions.py b/xarray/conventions.py index 596831e270a..cf207f0c37a 100644 --- a/xarray/conventions.py +++ b/xarray/conventions.py @@ -1,9 +1,8 @@ from __future__ import annotations -import warnings from collections import defaultdict from collections.abc import Hashable, Iterable, Mapping, MutableMapping -from typing import TYPE_CHECKING, Any, Union +from typing import TYPE_CHECKING, Any, Literal, Union import numpy as np import pandas as pd @@ -16,6 +15,7 @@ contains_cftime_datetimes, ) from xarray.core.pycompat import is_duck_dask_array +from xarray.core.utils import emit_user_level_warning from xarray.core.variable import IndexVariable, Variable CF_RELATED_DATA = ( @@ -111,13 +111,13 @@ def ensure_dtype_not_object(var: Variable, name: T_Name = None) -> Variable: return var if is_duck_dask_array(data): - warnings.warn( + emit_user_level_warning( f"variable {name} has data in the form of a dask array with " "dtype=object, which means it is being loaded into memory " "to determine a data type that can be safely stored on disk. " "To avoid this, coerce this variable to a fixed-size dtype " "with astype() before saving it.", - SerializationWarning, + category=SerializationWarning, ) data = data.compute() @@ -354,15 +354,14 @@ def _update_bounds_encoding(variables: T_Variables) -> None: and "bounds" in attrs and attrs["bounds"] in variables ): - warnings.warn( - "Variable '{0}' has datetime type and a " - "bounds variable but {0}.encoding does not have " - "units specified. The units encodings for '{0}' " - "and '{1}' will be determined independently " + emit_user_level_warning( + f"Variable {name:s} has datetime type and a " + f"bounds variable but {name:s}.encoding does not have " + f"units specified. The units encodings for {name:s} " + f"and {attrs['bounds']} will be determined independently " "and may not be equal, counter to CF-conventions. " "If this is a concern, specify a units encoding for " - "'{0}' before writing to a file.".format(name, attrs["bounds"]), - UserWarning, + f"{name:s} before writing to a file.", ) if has_date_units and "bounds" in attrs: @@ -379,7 +378,7 @@ def decode_cf_variables( concat_characters: bool = True, mask_and_scale: bool = True, decode_times: bool = True, - decode_coords: bool = True, + decode_coords: bool | Literal["coordinates", "all"] = True, drop_variables: T_DropVariables = None, use_cftime: bool | None = None, decode_timedelta: bool | None = None, @@ -441,11 +440,14 @@ def stackable(dim: Hashable) -> bool: if decode_coords in [True, "coordinates", "all"]: var_attrs = new_vars[k].attrs if "coordinates" in var_attrs: - coord_str = var_attrs["coordinates"] - var_coord_names = coord_str.split() - if all(k in variables for k in var_coord_names): - new_vars[k].encoding["coordinates"] = coord_str - del var_attrs["coordinates"] + var_coord_names = [ + c for c in var_attrs["coordinates"].split() if c in variables + ] + # propagate as is + new_vars[k].encoding["coordinates"] = var_attrs["coordinates"] + del var_attrs["coordinates"] + # but only use as coordinate if existing + if var_coord_names: coord_names.update(var_coord_names) if decode_coords == "all": @@ -461,8 +463,8 @@ def stackable(dim: Hashable) -> bool: for role_or_name in part.split() ] if len(roles_and_names) % 2 == 1: - warnings.warn( - f"Attribute {attr_name:s} malformed", stacklevel=5 + emit_user_level_warning( + f"Attribute {attr_name:s} malformed" ) var_names = roles_and_names[1::2] if all(var_name in variables for var_name in var_names): @@ -474,9 +476,8 @@ def stackable(dim: Hashable) -> bool: for proj_name in var_names if proj_name not in variables ] - warnings.warn( + emit_user_level_warning( f"Variable(s) referenced in {attr_name:s} not in variables: {referenced_vars_not_in_variables!s}", - stacklevel=5, ) del var_attrs[attr_name] @@ -493,7 +494,7 @@ def decode_cf( concat_characters: bool = True, mask_and_scale: bool = True, decode_times: bool = True, - decode_coords: bool = True, + decode_coords: bool | Literal["coordinates", "all"] = True, drop_variables: T_DropVariables = None, use_cftime: bool | None = None, decode_timedelta: bool | None = None, @@ -632,12 +633,11 @@ def _encode_coordinates(variables, attributes, non_dim_coord_names): for name in list(non_dim_coord_names): if isinstance(name, str) and " " in name: - warnings.warn( + emit_user_level_warning( f"coordinate {name!r} has a space in its name, which means it " "cannot be marked as a coordinate on disk and will be " "saved as a data variable instead", - SerializationWarning, - stacklevel=6, + category=SerializationWarning, ) non_dim_coord_names.discard(name) @@ -710,11 +710,11 @@ def _encode_coordinates(variables, attributes, non_dim_coord_names): if global_coordinates: attributes = dict(attributes) if "coordinates" in attributes: - warnings.warn( + emit_user_level_warning( f"cannot serialize global coordinates {global_coordinates!r} because the global " f"attribute 'coordinates' already exists. This may prevent faithful roundtripping" f"of xarray datasets", - SerializationWarning, + category=SerializationWarning, ) else: attributes["coordinates"] = " ".join(sorted(map(str, global_coordinates))) diff --git a/xarray/tests/test_conventions.py b/xarray/tests/test_conventions.py index 4dae7809be9..5157688b629 100644 --- a/xarray/tests/test_conventions.py +++ b/xarray/tests/test_conventions.py @@ -80,6 +80,28 @@ def test_decode_cf_with_conflicting_fill_missing_value() -> None: assert_identical(actual, expected) +def test_decode_cf_variable_with_mismatched_coordinates() -> None: + # tests for decoding mismatched coordinates attributes + # see GH #1809 + zeros1 = np.zeros((1, 5, 3)) + orig = Dataset( + { + "XLONG": (["x", "y"], zeros1.squeeze(0), {}), + "XLAT": (["x", "y"], zeros1.squeeze(0), {}), + "foo": (["time", "x", "y"], zeros1, {"coordinates": "XTIME XLONG XLAT"}), + "time": ("time", [0.0], {"units": "hours since 2017-01-01"}), + } + ) + decoded = conventions.decode_cf(orig, decode_coords=True) + assert decoded["foo"].encoding["coordinates"] == "XTIME XLONG XLAT" + assert list(decoded.coords.keys()) == ["XLONG", "XLAT", "time"] + + decoded = conventions.decode_cf(orig, decode_coords=False) + assert "coordinates" not in decoded["foo"].encoding + assert decoded["foo"].attrs.get("coordinates") == "XTIME XLONG XLAT" + assert list(decoded.coords.keys()) == ["time"] + + @requires_cftime class TestEncodeCFVariable: def test_incompatible_attributes(self) -> None: @@ -246,9 +268,12 @@ def test_dataset(self) -> None: assert_identical(expected, actual) def test_invalid_coordinates(self) -> None: - # regression test for GH308 + # regression test for GH308, GH1809 original = Dataset({"foo": ("t", [1, 2], {"coordinates": "invalid"})}) + decoded = Dataset({"foo": ("t", [1, 2], {}, {"coordinates": "invalid"})}) actual = conventions.decode_cf(original) + assert_identical(decoded, actual) + actual = conventions.decode_cf(original, decode_coords=False) assert_identical(original, actual) def test_decode_coordinates(self) -> None: From ba7f2d5dc8a6f77c91a3bb9e54b5ca39abcdb939 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kai=20M=C3=BChlbauer?= Date: Tue, 26 Sep 2023 10:12:44 +0200 Subject: [PATCH 16/17] Release 2023.09.0 (#8229) --- doc/whats-new.rst | 36 +++++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/doc/whats-new.rst b/doc/whats-new.rst index c37a3213793..2b0d2e151c5 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -14,10 +14,21 @@ What's New np.random.seed(123456) -.. _whats-new.2023.08.1: +.. _whats-new.2023.09.0: -v2023.08.1 (unreleased) ------------------------ +v2023.09.0 (Sep 26, 2023) +------------------------- + +This release continues work on the new :py:class:`xarray.Coordinates` object, allows to provide `preferred_chunks` when +reading from netcdf files, enables :py:func:`xarray.apply_ufunc` to handle missing core dimensions and fixes several bugs. + +Thanks to the 24 contributors to this release: Alexander Fischer, Amrest Chinkamol, Benoit Bovy, Darsh Ranjan, Deepak Cherian, +Gianfranco Costamagna, Gregorio L. Trevisan, Illviljan, Joe Hamman, JR, Justus Magin, Kai Mühlbauer, Kian-Meng Ang, Kyle Sunden, +Martin Raspaud, Mathias Hauser, Mattia Almansi, Maximilian Roos, András Gunyhó, Michael Niklas, Richard Kleijn, Riulinchen, +Tom Nicholas and Wiktor Kraśnicki. + +We welcome the following new contributors to Xarray!: Alexander Fischer, Amrest Chinkamol, Darsh Ranjan, Gianfranco Costamagna, Gregorio L. Trevisan, +Kian-Meng Ang, Riulinchen and Wiktor Kraśnicki. New Features ~~~~~~~~~~~~ @@ -28,11 +39,8 @@ New Features By `Benoît Bovy `_. - Provide `preferred_chunks` for data read from netcdf files (:issue:`1440`, :pull:`7948`). By `Martin Raspaud `_. -- Improved static typing of reduction methods (:pull:`6746`). - By `Richard Kleijn `_. - Added `on_missing_core_dims` to :py:meth:`apply_ufunc` to allow for copying or - dropping a :py:class:`Dataset`'s variables with missing core dimensions. - (:pull:`8138`) + dropping a :py:class:`Dataset`'s variables with missing core dimensions (:pull:`8138`). By `Maximilian Roos `_. Breaking changes @@ -61,6 +69,8 @@ Deprecations Bug fixes ~~~~~~~~~ +- Improved static typing of reduction methods (:pull:`6746`). + By `Richard Kleijn `_. - Fix bug where empty attrs would generate inconsistent tokens (:issue:`6970`, :pull:`8101`). By `Mattia Almansi `_. - Improved handling of multi-coordinate indexes when updating coordinates, including bug fixes @@ -71,8 +81,8 @@ Bug fixes :pull:`8104`). By `Benoît Bovy `_. - Fix bug where :py:class:`DataArray` instances on the right-hand side - of :py:meth:`DataArray.__setitem__` lose dimension names. - (:issue:`7030`, :pull:`8067`) By `Darsh Ranjan `_. + of :py:meth:`DataArray.__setitem__` lose dimension names (:issue:`7030`, :pull:`8067`). + By `Darsh Ranjan `_. - Return ``float64`` in presence of ``NaT`` in :py:class:`~core.accessor_dt.DatetimeAccessor` and special case ``NaT`` handling in :py:meth:`~core.accessor_dt.DatetimeAccessor.isocalendar` (:issue:`7928`, :pull:`8084`). @@ -83,14 +93,13 @@ Bug fixes - Calling plot with kwargs ``col``, ``row`` or ``hue`` no longer squeezes dimensions passed via these arguments (:issue:`7552`, :pull:`8174`). By `Wiktor Kraśnicki `_. -- Fixed a bug where casting from ``float`` to ``int64`` (undefined for ``NaN``) led to varying - issues (:issue:`7817`, :issue:`7942`, :issue:`7790`, :issue:`6191`, :issue:`7096`, +- Fixed a bug where casting from ``float`` to ``int64`` (undefined for ``NaN``) led to varying issues (:issue:`7817`, :issue:`7942`, :issue:`7790`, :issue:`6191`, :issue:`7096`, :issue:`1064`, :pull:`7827`). By `Kai Mühlbauer `_. - Fixed a bug where inaccurate ``coordinates`` silently failed to decode variable (:issue:`1809`, :pull:`8195`). By `Kai Mühlbauer `_ - ``.rolling_exp`` functions no longer mistakenly lose non-dimensioned coords - (:issue:`6528`, :pull:`8114`) + (:issue:`6528`, :pull:`8114`). By `Maximilian Roos `_. - In the event that user-provided datetime64/timedelta64 units and integer dtype encoding parameters conflict with each other, override the units to preserve an integer dtype for most faithful serialization to disk (:issue:`1064`, :pull:`8201`). By `Kai Mühlbauer `_. @@ -101,6 +110,8 @@ Bug fixes Documentation ~~~~~~~~~~~~~ +- Make documentation of :py:meth:`DataArray.where` clearer (:issue:`7767`, :pull:`7955`). + By `Riulinchen `_. Internal Changes ~~~~~~~~~~~~~~~~ @@ -116,7 +127,6 @@ Internal Changes - Test range of fill_value's in test_interpolate_pd_compat (:issue:`8146`, :pull:`8189`). By `Kai Mühlbauer `_. - .. _whats-new.2023.08.0: v2023.08.0 (Aug 18, 2023) From 84f5a0d2eef69cd2ee127c138e61066637d9f6ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kai=20M=C3=BChlbauer?= Date: Tue, 26 Sep 2023 17:24:49 +0200 Subject: [PATCH 17/17] [skip-ci] dev whats-new (#8232) --- doc/whats-new.rst | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/doc/whats-new.rst b/doc/whats-new.rst index 2b0d2e151c5..17744288aef 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -14,6 +14,35 @@ What's New np.random.seed(123456) +.. _whats-new.2023.09.1: + +v2023.09.1 (unreleased) +----------------------- + +New Features +~~~~~~~~~~~~ + + +Breaking changes +~~~~~~~~~~~~~~~~ + + +Deprecations +~~~~~~~~~~~~ + + +Bug fixes +~~~~~~~~~ + + +Documentation +~~~~~~~~~~~~~ + + +Internal Changes +~~~~~~~~~~~~~~~~ + + .. _whats-new.2023.09.0: v2023.09.0 (Sep 26, 2023)