Skip to content

Commit

Permalink
[pre-commit.ci] pre-commit autoupdate (#1926)
Browse files Browse the repository at this point in the history
* [pre-commit.ci] pre-commit autoupdate

updates:
- [github.com/astral-sh/ruff-pre-commit: v0.8.6 → v0.9.4](astral-sh/ruff-pre-commit@v0.8.6...v0.9.4)
- [github.com/codespell-project/codespell: v2.3.0 → v2.4.1](codespell-project/codespell@v2.3.0...v2.4.1)

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* fix mkdocs

* fix precommit

---------

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Edoardo Abati <[email protected]>
  • Loading branch information
pre-commit-ci[bot] and EdAbati authored Feb 3, 2025
1 parent a770707 commit d63da25
Show file tree
Hide file tree
Showing 24 changed files with 132 additions and 78 deletions.
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ ci:
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: 'v0.8.6'
rev: 'v0.9.4'
hooks:
# Run the formatter.
- id: ruff-format
Expand All @@ -20,7 +20,7 @@ repos:
additional_dependencies: ['polars==1.4.1', 'pytest==8.3.2']
files: ^(narwhals|tests)/
- repo: https://github.com/codespell-project/codespell
rev: 'v2.3.0'
rev: 'v2.4.1'
hooks:
- id: codespell
files: \.(py|rst|md)$
Expand Down
5 changes: 2 additions & 3 deletions mkdocs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -105,11 +105,10 @@ plugins:
- mkdocstrings:
handlers:
python:
import:
inventories:
- https://installer.readthedocs.io/en/stable/objects.inv
rendering:
show_signature_annotations: true
options:
show_signature_annotations: true
members_order: alphabetical
enable_inventory: true

Expand Down
6 changes: 1 addition & 5 deletions narwhals/_arrow/expr.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,11 +56,7 @@ def __init__(
self._kwargs = kwargs

def __repr__(self: Self) -> str: # pragma: no cover
return (
f"ArrowExpr("
f"depth={self._depth}, "
f"function_name={self._function_name}, "
)
return f"ArrowExpr(depth={self._depth}, function_name={self._function_name}, "

def __call__(self: Self, df: ArrowDataFrame) -> Sequence[ArrowSeries]:
return self._call(df)
Expand Down
6 changes: 1 addition & 5 deletions narwhals/_arrow/selectors.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,11 +111,7 @@ def func(df: ArrowDataFrame) -> list[ArrowSeries]:

class ArrowSelector(ArrowExpr):
def __repr__(self: Self) -> str: # pragma: no cover
return (
f"ArrowSelector("
f"depth={self._depth}, "
f"function_name={self._function_name})"
)
return f"ArrowSelector(depth={self._depth}, function_name={self._function_name})"

def _to_expr(self: Self) -> ArrowExpr:
return ArrowExpr(
Expand Down
2 changes: 1 addition & 1 deletion narwhals/_dask/expr_dt.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def weekday(self: Self) -> DaskExpr:

def to_string(self: Self, format: str) -> DaskExpr: # noqa: A002
return self._compliant_expr._from_call(
lambda _input, format: _input.dt.strftime(format.replace("%.f", ".%f")),
lambda _input, format: _input.dt.strftime(format.replace("%.f", ".%f")), # noqa: A006
"strftime",
format=format,
returns_scalar=self._compliant_expr._returns_scalar,
Expand Down
2 changes: 1 addition & 1 deletion narwhals/_dask/expr_str.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def slice(self: Self, offset: int, length: int | None) -> DaskExpr:

def to_datetime(self: Self, format: str | None) -> DaskExpr: # noqa: A002
return self._compliant_expr._from_call(
lambda _input, format: dd.to_datetime(_input, format=format),
lambda _input, format: dd.to_datetime(_input, format=format), # noqa: A006
"to_datetime",
format=format,
returns_scalar=self._compliant_expr._returns_scalar,
Expand Down
6 changes: 1 addition & 5 deletions narwhals/_dask/selectors.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,11 +121,7 @@ def func(df: DaskLazyFrame) -> list[dx.Series]:

class DaskSelector(DaskExpr):
def __repr__(self: Self) -> str: # pragma: no cover
return (
f"DaskSelector("
f"depth={self._depth}, "
f"function_name={self._function_name})"
)
return f"DaskSelector(depth={self._depth}, function_name={self._function_name})"

def _to_expr(self: Self) -> DaskExpr:
return DaskExpr(
Expand Down
2 changes: 1 addition & 1 deletion narwhals/_duckdb/selectors.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def func(df: DuckDBLazyFrame) -> list[duckdb.Expression]:

class DuckDBSelector(DuckDBExpr):
def __repr__(self: Self) -> str: # pragma: no cover
return f"DuckDBSelector(" f"function_name={self._function_name})"
return f"DuckDBSelector(function_name={self._function_name})"

def _to_expr(self: Self) -> DuckDBExpr:
return DuckDBExpr(
Expand Down
5 changes: 1 addition & 4 deletions narwhals/_pandas_like/expr.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,10 +75,7 @@ def __call__(self: Self, df: PandasLikeDataFrame) -> Sequence[PandasLikeSeries]:

def __repr__(self) -> str: # pragma: no cover
return (
f"PandasLikeExpr("
f"depth={self._depth}, "
f"function_name={self._function_name}, "
")"
f"PandasLikeExpr(depth={self._depth}, function_name={self._function_name}, )"
)

def __narwhals_namespace__(self: Self) -> PandasLikeNamespace:
Expand Down
4 changes: 1 addition & 3 deletions narwhals/_pandas_like/selectors.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,9 +120,7 @@ def func(df: PandasLikeDataFrame) -> list[PandasLikeSeries]:
class PandasSelector(PandasLikeExpr):
def __repr__(self) -> str: # pragma: no cover
return (
f"PandasSelector("
f"depth={self._depth}, "
f"function_name={self._function_name}, "
f"PandasSelector(depth={self._depth}, function_name={self._function_name}, "
)

def _to_expr(self: Self) -> PandasLikeExpr:
Expand Down
2 changes: 1 addition & 1 deletion narwhals/_spark_like/selectors.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def func(df: SparkLikeLazyFrame) -> list[Column]:

class SparkLikeSelector(SparkLikeExpr):
def __repr__(self: Self) -> str: # pragma: no cover
return f"SparkLikeSelector(" f"function_name={self._function_name})"
return f"SparkLikeSelector(function_name={self._function_name})"

def _to_expr(self: Self) -> SparkLikeExpr:
return SparkLikeExpr(
Expand Down
51 changes: 40 additions & 11 deletions narwhals/dataframe.py
Original file line number Diff line number Diff line change
Expand Up @@ -1699,7 +1699,9 @@ def iter_rows(
We define a library agnostic function:
>>> def agnostic_iter_rows(df_native: IntoDataFrame, *, named: bool):
... return nw.from_native(df_native, eager_only=True).iter_rows(named=named)
... return nw.from_native(df_native, eager_only=True).iter_rows(
... named=named
... )
We can then pass any supported library such as Pandas, Polars, or PyArrow
to `agnostic_iter_rows`:
Expand Down Expand Up @@ -2376,7 +2378,9 @@ def filter(
>>> def agnostic_filter(df_native: IntoFrameT) -> IntoFrameT:
... df = nw.from_native(df_native)
... return df.filter((nw.col("foo") < 3) & (nw.col("ham") == "a")).to_native()
... return df.filter(
... (nw.col("foo") < 3) & (nw.col("ham") == "a")
... ).to_native()
>>> agnostic_filter(df_pd)
foo bar ham
0 1 6 a
Expand Down Expand Up @@ -2555,7 +2559,12 @@ def group_by(
>>> def agnostic_group_by_agg(df_native: IntoDataFrameT) -> IntoDataFrameT:
... df = nw.from_native(df_native, eager_only=True)
... return df.group_by(["a", "b"]).agg(nw.max("c")).sort("a", "b").to_native()
... return (
... df.group_by(["a", "b"])
... .agg(nw.max("c"))
... .sort("a", "b")
... .to_native()
... )
>>> agnostic_group_by_agg(df_pd)
a b c
Expand Down Expand Up @@ -2850,7 +2859,9 @@ def join_asof(
... ) -> IntoFrameT:
... df = nw.from_native(df_native)
... other = nw.from_native(other_native)
... return df.join_asof(other, on="datetime", strategy=strategy).to_native()
... return df.join_asof(
... other, on="datetime", strategy=strategy
... ).to_native()
We can then pass any supported library such as Pandas or Polars
to `agnostic_join_asof_datetime`:
Expand Down Expand Up @@ -3055,14 +3066,22 @@ def is_empty(self: Self) -> bool:
>>> df_pd = pd.DataFrame(data)
>>> df_pl = pl.DataFrame(data)
>>> df_pa = pa.table(data)
>>> agnostic_is_empty(df_pd), agnostic_is_empty(df_pl), agnostic_is_empty(df_pa)
>>> (
... agnostic_is_empty(df_pd),
... agnostic_is_empty(df_pl),
... agnostic_is_empty(df_pa),
... )
(True, True, True)
>>> data = {"foo": [100, 2, 3], "bar": [4, 5, 6]}
>>> df_pd = pd.DataFrame(data)
>>> df_pl = pl.DataFrame(data)
>>> df_pa = pa.table(data)
>>> agnostic_is_empty(df_pd), agnostic_is_empty(df_pl), agnostic_is_empty(df_pa)
>>> (
... agnostic_is_empty(df_pd),
... agnostic_is_empty(df_pl),
... agnostic_is_empty(df_pa),
... )
(False, False, False)
"""
return self._compliant_frame.is_empty() # type: ignore[no-any-return]
Expand Down Expand Up @@ -3395,7 +3414,9 @@ def pivot(
>>> def agnostic_pivot(df_native: IntoDataFrameT) -> IntoDataFrameT:
... df = nw.from_native(df_native, eager_only=True)
... return df.pivot("col", index="ix", aggregate_function="sum").to_native()
... return df.pivot(
... "col", index="ix", aggregate_function="sum"
... ).to_native()
We can then pass any supported library such as Pandas or Polars
to `agnostic_pivot`:
Expand Down Expand Up @@ -4306,7 +4327,9 @@ def with_columns(
>>> def agnostic_with_columns(df_native: IntoFrameT) -> IntoFrameT:
... df = nw.from_native(df_native)
... return (
... df.with_columns((nw.col("a") * 2).alias("2a")).collect().to_native()
... df.with_columns((nw.col("a") * 2).alias("2a"))
... .collect()
... .to_native()
... )
We can then pass any supported library such as Polars or Dask to `agnostic_with_columns`:
Expand Down Expand Up @@ -4422,7 +4445,9 @@ def select(
>>> def agnostic_select(df_native: IntoFrameT) -> IntoFrameT:
... df = nw.from_native(df_native)
... return df.select(nw.col("foo"), nw.col("bar") + 1).collect().to_native()
... return (
... df.select(nw.col("foo"), nw.col("bar") + 1).collect().to_native()
... )
>>> agnostic_select(lf_pl)
shape: (3, 2)
Expand Down Expand Up @@ -5047,7 +5072,9 @@ def sort(
>>> def agnostic_sort(df_native: IntoFrameT) -> IntoFrameT:
... df = nw.from_native(df_native)
... return df.sort("c", "a", descending=[False, True]).collect().to_native()
... return (
... df.sort("c", "a", descending=[False, True]).collect().to_native()
... )
We can then pass any supported library such as Polars or Dask to `agnostic_sort`:
Expand Down Expand Up @@ -5262,7 +5289,9 @@ def join_asof(
│ 2018-08-01 00:00:00 ┆ 82.66 ┆ 4566 │
│ 2019-01-01 00:00:00 ┆ 83.12 ┆ 4696 │
└─────────────────────┴────────────┴──────┘
>>> agnostic_join_asof_datetime(population_dask, gdp_dask, strategy="backward")
>>> agnostic_join_asof_datetime(
... population_dask, gdp_dask, strategy="backward"
... )
datetime population gdp
0 2016-03-01 82.19 4164
1 2018-08-01 82.66 4566
Expand Down
4 changes: 3 additions & 1 deletion narwhals/dtypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -472,7 +472,9 @@ class Datetime(TemporalType):
... .astype("datetime64[ms, Africa/Accra]")
... )
>>> ser_pl = (
... pl.Series(data).cast(pl.Datetime("ms")).dt.replace_time_zone("Africa/Accra")
... pl.Series(data)
... .cast(pl.Datetime("ms"))
... .dt.replace_time_zone("Africa/Accra")
... )
>>> ser_pa = pc.assume_timezone(
... pa.chunked_array([data], type=pa.timestamp("ms")), "Africa/Accra"
Expand Down
20 changes: 15 additions & 5 deletions narwhals/expr.py
Original file line number Diff line number Diff line change
Expand Up @@ -1437,7 +1437,9 @@ def is_nan(self: Self) -> Self:
... "SELECT * FROM VALUES (null, CAST('NaN' AS DOUBLE)), (2, 2.) df(a, b)"
... )
>>> df = nw.from_native(df_native)
>>> df.with_columns(a_is_nan=nw.col("a").is_nan(), b_is_nan=nw.col("b").is_nan())
>>> df.with_columns(
... a_is_nan=nw.col("a").is_nan(), b_is_nan=nw.col("b").is_nan()
... )
┌────────────────────────────────────────┐
| Narwhals LazyFrame |
|----------------------------------------|
Expand Down Expand Up @@ -1507,7 +1509,9 @@ def fill_null(
... }
... )
>>> df = nw.from_native(df_native)
>>> df.with_columns(nw.col("a", "b").fill_null(0).name.suffix("_nulls_filled"))
>>> df.with_columns(
... nw.col("a", "b").fill_null(0).name.suffix("_nulls_filled")
... )
┌────────────────────────────────────────────────┐
| Narwhals DataFrame |
|------------------------------------------------|
Expand Down Expand Up @@ -1771,7 +1775,9 @@ def null_count(self: Self) -> Self:
Examples:
>>> import pandas as pd
>>> import narwhals as nw
>>> df_native = pd.DataFrame({"a": [1, 2, None, 1], "b": ["a", None, "b", None]})
>>> df_native = pd.DataFrame(
... {"a": [1, 2, None, 1], "b": ["a", None, "b", None]}
... )
>>> df = nw.from_native(df_native)
>>> df.select(nw.all().null_count())
┌──────────────────┐
Expand Down Expand Up @@ -1830,7 +1836,9 @@ def is_last_distinct(self: Self) -> Self:
>>> import narwhals as nw
>>> df_native = pd.DataFrame({"a": [1, 2, 3, 1], "b": ["a", "a", "b", "c"]})
>>> df = nw.from_native(df_native)
>>> df.with_columns(nw.all().is_last_distinct().name.suffix("_is_last_distinct"))
>>> df.with_columns(
... nw.all().is_last_distinct().name.suffix("_is_last_distinct")
... )
┌───────────────────────────────────────────────┐
| Narwhals DataFrame |
|-----------------------------------------------|
Expand Down Expand Up @@ -1872,7 +1880,9 @@ def quantile(
Examples:
>>> import pandas as pd
>>> import narwhals as nw
>>> df_native = pd.DataFrame({"a": list(range(50)), "b": list(range(50, 100))})
>>> df_native = pd.DataFrame(
... {"a": list(range(50)), "b": list(range(50, 100))}
... )
>>> df = nw.from_native(df_native)
>>> df.select(nw.col("a", "b").quantile(0.5, interpolation="linear"))
┌──────────────────┐
Expand Down
8 changes: 6 additions & 2 deletions narwhals/expr_dt.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,9 @@ def date(self: Self) -> ExprT:
>>> import narwhals as nw
>>> from narwhals.typing import IntoFrameT
>>>
>>> data = {"a": [datetime(2012, 1, 7, 10, 20), datetime(2023, 3, 10, 11, 32)]}
>>> data = {
... "a": [datetime(2012, 1, 7, 10, 20), datetime(2023, 3, 10, 11, 32)]
... }
>>> df_pd = pd.DataFrame(data).convert_dtypes(dtype_backend="pyarrow")
>>> df_pl = pl.DataFrame(data)
>>> df_pa = pa.table(data)
Expand Down Expand Up @@ -913,7 +915,9 @@ def total_seconds(self: Self) -> ExprT:
>>> import narwhals as nw
>>> from narwhals.typing import IntoFrameT
>>>
>>> data = {"a": [timedelta(seconds=10), timedelta(seconds=20, milliseconds=40)]}
>>> data = {
... "a": [timedelta(seconds=10), timedelta(seconds=20, milliseconds=40)]
... }
>>> df_pd = pd.DataFrame(data)
>>> df_pl = pl.DataFrame(data)
>>> df_pa = pa.table(data)
Expand Down
4 changes: 3 additions & 1 deletion narwhals/expr_name.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,9 @@ def keep(self: Self) -> ExprT:
>>> def agnostic_name_keep(df_native: IntoFrame) -> list[str]:
... df = nw.from_native(df_native)
... return df.select(nw.col("foo").alias("alias_for_foo").name.keep()).columns
... return df.select(
... nw.col("foo").alias("alias_for_foo").name.keep()
... ).columns
We can then pass any supported library such as pandas, Polars, or
PyArrow to `agnostic_name_keep`:
Expand Down
8 changes: 6 additions & 2 deletions narwhals/expr_str.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,9 @@ def replace_all(
>>> def agnostic_str_replace_all(df_native: IntoFrameT) -> IntoFrameT:
... df = nw.from_native(df_native)
... df = df.with_columns(replaced=nw.col("foo").str.replace_all("abc", ""))
... df = df.with_columns(
... replaced=nw.col("foo").str.replace_all("abc", "")
... )
... return df.to_native()
We can then pass any supported library such as pandas, Polars, or
Expand Down Expand Up @@ -429,7 +431,9 @@ def contains(self: Self, pattern: str, *, literal: bool = False) -> ExprT:
... df = nw.from_native(df_native)
... return df.with_columns(
... default_match=nw.col("pets").str.contains("parrot|Dove"),
... case_insensitive_match=nw.col("pets").str.contains("(?i)parrot|Dove"),
... case_insensitive_match=nw.col("pets").str.contains(
... "(?i)parrot|Dove"
... ),
... literal_match=nw.col("pets").str.contains(
... "parrot|Dove", literal=True
... ),
Expand Down
Loading

0 comments on commit d63da25

Please sign in to comment.