diff --git a/py-polars/polars/convert/normalize.py b/py-polars/polars/convert/normalize.py index d6e24ddc8af5..a92108d930bb 100644 --- a/py-polars/polars/convert/normalize.py +++ b/py-polars/polars/convert/normalize.py @@ -16,10 +16,6 @@ from polars.schema import Schema -def _remove_prefix(text: str, prefix: str) -> str: - return text.removeprefix(prefix) - - def _simple_json_normalize( data: dict[Any, Any] | Sequence[dict[Any, Any] | Any], separator: str, @@ -206,7 +202,7 @@ def normalize_json( new_key = f"{key_string}{separator}{key}" if not key_string: - new_key = _remove_prefix(new_key, separator) + new_key = new_key.removeprefix(separator) normalize_json( data=value, diff --git a/py-polars/polars/dataframe/frame.py b/py-polars/polars/dataframe/frame.py index d1f39c3b135a..eca65a22c8c8 100644 --- a/py-polars/polars/dataframe/frame.py +++ b/py-polars/polars/dataframe/frame.py @@ -8506,9 +8506,7 @@ def unstack( n_cols = step n_rows = math.ceil(height / n_cols) - n_fill = n_cols * n_rows - height - - if n_fill: + if n_fill := n_cols * n_rows - height: if not isinstance(fill_values, list): fill_values = [fill_values for _ in range(df.width)] diff --git a/py-polars/polars/io/csv/batched_reader.py b/py-polars/polars/io/csv/batched_reader.py index 35073a736f79..e0384f03dde2 100644 --- a/py-polars/polars/io/csv/batched_reader.py +++ b/py-polars/polars/io/csv/batched_reader.py @@ -130,8 +130,7 @@ def next_batches(self, n: int) -> list[DataFrame] | None: ------- list of DataFrames """ - batches = self._reader.next_batches(n) - if batches is not None: + if (batches := self._reader.next_batches(n)) is not None: if self.new_columns: return [ _update_columns(wrap_df(df), self.new_columns) for df in batches diff --git a/py-polars/polars/io/delta.py b/py-polars/polars/io/delta.py index 9cb621fc5534..9b0219fa8dff 100644 --- a/py-polars/polars/io/delta.py +++ b/py-polars/polars/io/delta.py @@ -347,8 +347,7 @@ def _check_for_unsupported_types(dtypes: list[DataType]) -> None: # Note that this overlap check does NOT work correctly for Categorical, so # if Categorical is added back to unsupported_types a different check will # need to be used. - overlap = schema_dtypes & unsupported_types - if overlap: + if overlap := schema_dtypes & unsupported_types: msg = f"dataframe contains unsupported data types: {overlap!r}" raise TypeError(msg) diff --git a/py-polars/polars/lazyframe/in_process.py b/py-polars/polars/lazyframe/in_process.py index e2969c2b1baf..f4b8ae7fcad1 100644 --- a/py-polars/polars/lazyframe/in_process.py +++ b/py-polars/polars/lazyframe/in_process.py @@ -32,8 +32,7 @@ def fetch(self) -> DataFrame | None: If it is ready, a materialized DataFrame is returned. If it is not ready it will return `None`. """ - out = self._inner.fetch() - if out is not None: + if (out := self._inner.fetch()) is not None: return wrap_df(out) else: return None diff --git a/py-polars/polars/series/series.py b/py-polars/polars/series/series.py index b5a02c8756c2..f57f976df70a 100644 --- a/py-polars/polars/series/series.py +++ b/py-polars/polars/series/series.py @@ -1225,7 +1225,7 @@ def __contains__(self, item: Any) -> bool: return self.has_nulls() return self.implode().list.contains(item).item() - def __iter__(self) -> Generator[Any, None, None]: + def __iter__(self) -> Generator[Any]: if self.dtype in (List, Array): # TODO: either make a change and return py-native list data here, or find # a faster way to return nested/List series; sequential 'get_index' calls diff --git a/py-polars/polars/series/utils.py b/py-polars/polars/series/utils.py index 7668ad8143b1..65fd311fbbcc 100644 --- a/py-polars/polars/series/utils.py +++ b/py-polars/polars/series/utils.py @@ -100,8 +100,7 @@ def call_expr(func: SeriesMethod) -> SeriesMethod: def wrapper(self: Any, *args: P.args, **kwargs: P.kwargs) -> Series: s = wrap_s(self._s) expr = F.col(s.name) - namespace = getattr(self, "_accessor", None) - if namespace is not None: + if (namespace := getattr(self, "_accessor", None)) is not None: expr = getattr(expr, namespace) f = getattr(expr, func.__name__) return s.to_frame().select_seq(f(*args, **kwargs)).to_series()