Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

🧹 refurb cleanup + one typo #152

Merged
merged 3 commits into from
Dec 27, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions plotly_resampler/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
"""**plotly\_resampler**: visualizing large sequences."""

import contextlib

from .aggregation import LTTB, EfficientLTTB, EveryNthPoint
from .figure_resampler import FigureResampler, FigureWidgetResampler
from .registering import register_plotly_resampler, unregister_plotly_resampler
Expand All @@ -20,12 +22,11 @@
]


try: # Enable ipywidgets on google colab!
# Enable ipywidgets on google colab!
with contextlib.suppress(ImportError, ModuleNotFoundError):
import sys

if "google.colab" in sys.modules:
from google.colab import output

output.enable_custom_widget_manager()
except (ImportError, ModuleNotFoundError):
pass
4 changes: 2 additions & 2 deletions plotly_resampler/aggregation/aggregators.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def __init__(self, interleave_gaps: bool = True, nan_position="end"):
super().__init__(
interleave_gaps,
nan_position,
dtype_regex_list=[rf"{dtype}\d*" for dtype in ["float", "int", "uint"]]
dtype_regex_list=[rf"{dtype}\d*" for dtype in ("float", "int", "uint")]
+ ["category", "bool"],
)

Expand Down Expand Up @@ -278,7 +278,7 @@ def __init__(self, interleave_gaps: bool = True, nan_position="end"):
super().__init__(
interleave_gaps,
nan_position,
dtype_regex_list=[rf"{dtype}\d*" for dtype in ["float", "int", "uint"]]
dtype_regex_list=[rf"{dtype}\d*" for dtype in ("float", "int", "uint")]
+ ["category", "bool"],
)

Expand Down
2 changes: 1 addition & 1 deletion plotly_resampler/aggregation/algorithms/lttb_c.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def downsample(x: np.ndarray, y: np.ndarray, n_out: int) -> np.ndarray:
"""
if x.dtype == np.int64 and y.dtype == np.float64:
return downsample_int_double(x, y, n_out)
elif x.dtype == np.int64 and y.dtype == np.int64:
elif x.dtype == y.dtype == np.int64:
jvdd marked this conversation as resolved.
Show resolved Hide resolved
return downsample_int_int(x, y, n_out)
elif x.dtype == np.int64 and y.dtype == np.float32:
return downsample_int_float(x, y, n_out)
Expand Down
2 changes: 1 addition & 1 deletion plotly_resampler/aggregation/algorithms/lttbc.c
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ static PyObject *downsample_int_double(PyObject *self, PyObject *args)
}

// This method only returns the index positions of the selected points.
// almost everything can be sucessfully parsed to this so this will be
// almost everything can be successfully parsed to this so this will be
// our fallback method
static PyObject *downsample_double_double(PyObject *self, PyObject *args)
{
Expand Down
4 changes: 2 additions & 2 deletions plotly_resampler/figure_resampler/figure_resampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -377,7 +377,7 @@ def show_dash(
mode is None or mode in available_modes
), f"mode must be one of {available_modes}"
graph_properties = {} if graph_properties is None else graph_properties
assert "config" not in graph_properties.keys() # There is a param for config
assert "config" not in graph_properties # There is a param for config

# 0. Check if the traces need to be updated when there is a xrange set
# This will be the case when the users has set a xrange (via the `update_layout`
Expand All @@ -388,7 +388,7 @@ def show_dash(
if x_range: # when not None
relayout_dict[f"{xaxis_str}.range[0]"] = x_range[0]
relayout_dict[f"{xaxis_str}.range[1]"] = x_range[1]
if len(relayout_dict):
if relayout_dict: # when not empty
update_data = self.construct_update_data(relayout_dict)

if not self._is_no_update(update_data): # when there is an update
Expand Down
16 changes: 9 additions & 7 deletions plotly_resampler/figure_resampler/figure_resampler_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -433,12 +433,12 @@ def _check_update_figure_dict(
if (
xaxis_filter_short == "x"
and (
x_anchor_trace not in [None, "x"]
x_anchor_trace not in (None, "x")
and xaxis_matches != xaxis_filter_short
)
) or (
xaxis_filter_short != "x"
and (xaxis_filter_short not in [x_anchor_trace, xaxis_matches])
and (xaxis_filter_short not in (x_anchor_trace, xaxis_matches))
):
continue

Expand Down Expand Up @@ -1260,7 +1260,7 @@ def construct_update_data(
# 1. Base case - there is an x-range specified in the front-end
start_matches = self._re_matches(re.compile(r"xaxis\d*.range\[0]"), cl_k)
stop_matches = self._re_matches(re.compile(r"xaxis\d*.range\[1]"), cl_k)
if len(start_matches) and len(stop_matches):
if start_matches and stop_matches: # when both are not empty
for t_start_key, t_stop_key in zip(start_matches, stop_matches):
# Check if the xaxis<NUMB> part of xaxis<NUMB>.[0-1] matches
xaxis = t_start_key.split(".")[0]
Expand All @@ -1280,7 +1280,7 @@ def construct_update_data(
)
spike_matches = self._re_matches(re.compile(r"xaxis\d*.showspikes"), cl_k)
# 2.1 Reset-axes -> autorange & reset to the global data view
if len(autorange_matches) and len(spike_matches):
if autorange_matches and spike_matches: # when both are not empty
for autorange_key in autorange_matches:
if relayout_data[autorange_key]:
xaxis = autorange_key.split(".")[0]
Expand All @@ -1291,14 +1291,16 @@ def construct_update_data(
)
# 2.1. Autorange -> do nothing, the autorange will be applied on the
# current front-end view
elif len(autorange_matches) and not len(spike_matches):
elif (
autorange_matches and not spike_matches
): # when only autorange is not empty
# PreventUpdate returns a 204 status code response on the
# relayout post request
return dash.no_update

# If we do not have any traces to be updated, we will return an empty
# request response
if len(updated_trace_indices) == 0:
if not updated_trace_indices: # when updated_trace_indices is empty
# PreventUpdate returns a 204 status-code response on the relayout post
# request
return dash.no_update
Expand Down Expand Up @@ -1336,7 +1338,7 @@ def _parse_dtype_orjson(series: np.ndarray) -> np.ndarray:
# * float16 and float128 aren't supported with latest orjson versions (3.8.1)
# * this method assumes that the it will not get a float128 series
# -> this method can be removed if orjson supports float16
if series.dtype in [np.float16]:
if series.dtype == np.float16:
return series.astype(np.float32)
return series

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ def _update_x_ranges(self, layout, *x_ranges, force_update: bool = False):
# -> save current xaxis range to _prev_layout
self._prev_layout[xaxis_str]["range"] = x_range

if len(relayout_dict):
if relayout_dict: # when not empty
# Construct the update data
update_data = self.construct_update_data(relayout_dict)

Expand Down
12 changes: 6 additions & 6 deletions plotly_resampler/figure_resampler/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ def timedelta_to_str(td: pd.Timedelta) -> str:
if c.days > 0:
out_str += f"{c.days}D"
if c.hours > 0 or c.minutes > 0 or c.seconds > 0 or c.milliseconds > 0:
out_str += "_" if len(out_str) else ""
out_str += "_" if out_str else "" # add seperator if non-empty

if c.hours > 0:
out_str += f"{c.hours}h"
Expand All @@ -142,11 +142,11 @@ def timedelta_to_str(td: pd.Timedelta) -> str:
else:
out_str += f"{c.seconds}s"
elif c.milliseconds > 0:
out_str += f"{str(c.milliseconds)}ms"
out_str += f"{c.milliseconds}ms"
if c.microseconds > 0:
out_str += f"{str(c.microseconds)}us"
out_str += f"{c.microseconds}us"
if c.nanoseconds > 0:
out_str += f"{str(c.nanoseconds)}ns"
out_str += f"{c.nanoseconds}ns"
return out_str


Expand All @@ -156,14 +156,14 @@ def round_td_str(td: pd.Timedelta) -> str:
.. seealso::
:func:`timedelta_to_str`
"""
for t_s in ["D", "H", "min", "s", "ms", "us", "ns"]:
for t_s in ("D", "H", "min", "s", "ms", "us", "ns"):
if td > 0.95 * pd.Timedelta(f"1{t_s}"):
return timedelta_to_str(td.round(t_s))


def round_number_str(number: float) -> str:
if number > 0.95:
for unit, scaling in [("M", int(1e6)), ("k", int(1e3))]:
for unit, scaling in (("M", int(1e6)), ("k", int(1e3))):
if number / scaling > 0.95:
return f"{round(number / scaling)}{unit}"
return str(round(number))
Expand Down