From 0b0e56f53cbb90669760dc9694f221f0052252fd Mon Sep 17 00:00:00 2001
From: Bas Nijholt <bas@nijho.lt>
Date: Fri, 7 Apr 2023 00:02:53 -0700
Subject: [PATCH 1/3] Replace isort, flake8, and pyupgrade by ruff

---
 .pre-commit-config.yaml | 16 ++++------------
 pyproject.toml          | 33 +++++++++++++++++++++++++++------
 2 files changed, 31 insertions(+), 18 deletions(-)

diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index eccb0b4bd..7698bb4bd 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -12,16 +12,8 @@ repos:
     rev: 23.1.0
     hooks:
       - id: black
-  - repo: https://github.com/asottile/pyupgrade
-    rev: v3.3.1
+  - repo: https://github.com/charliermarsh/ruff-pre-commit
+    rev: "v0.0.259"
     hooks:
-      - id: pyupgrade
-        args: ["--py37-plus"]
-  - repo: https://github.com/PyCQA/isort
-    rev: 5.12.0
-    hooks:
-      - id: isort
-  - repo: https://github.com/pycqa/flake8
-    rev: 6.0.0
-    hooks:
-      - id: flake8
+      - id: ruff
+        args: ["--fix"]
diff --git a/pyproject.toml b/pyproject.toml
index 6a589e845..933129f7d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -29,11 +29,11 @@ dependencies = [
 ]
 
 [project.optional-dependencies]
-other =[
+other = [
     "dill",
     "distributed",
-    "ipyparallel>=6.2.5",  # because of https://github.com/ipython/ipyparallel/issues/404
-    "scikit-optimize>=0.8.1",  # because of https://github.com/scikit-optimize/scikit-optimize/issues/931
+    "ipyparallel>=6.2.5",       # because of https://github.com/ipython/ipyparallel/issues/404
+    "scikit-optimize>=0.8.1",   # because of https://github.com/scikit-optimize/scikit-optimize/issues/931
     "scikit-learn",
     "wexpect; os_name == 'nt'",
     "pexpect; os_name != 'nt'",
@@ -91,8 +91,29 @@ precision = 2
 [tool.coverage.xml]
 output = ".coverage.xml"
 
-[tool.isort]
-profile = "black"
-
 [tool.mypy]
 ignore_missing_imports = true
+python_version = "3.7"
+
+[tool.ruff]
+line-length = 150
+target-version = "py37"
+select = ["B", "C", "E", "F", "W", "T", "B9"]
+ignore = [
+    "T20",     # flake8-print
+    "ANN101",  # Missing type annotation for {name} in method
+    "S101",    # Use of assert detected
+    "PD901",   # df is a bad variable name. Be kinder to your future self.
+    "ANN401",  # Dynamically typed expressions (typing.Any) are disallowed in {name}
+    "D402",    # First line should not be the function's signature
+    "PLW0603", # Using the global statement to update `X` is discouraged
+    "D401",    # First line of docstring should be in imperative mood
+]
+
+[tool.ruff.per-file-ignores]
+"tests/*" = ["SLF001"]
+"ci/*" = ["INP001"]
+"tests/test_examples.py" = ["E501"]
+
+[tool.ruff.mccabe]
+max-complexity = 18

From 98020cfed982bda224a69a7f58b11eb430739504 Mon Sep 17 00:00:00 2001
From: Bas Nijholt <bas@nijho.lt>
Date: Fri, 7 Apr 2023 00:10:38 -0700
Subject: [PATCH 2/3] Fix all ruff issues

---
 adaptive/_version.py                     |  2 +-
 adaptive/learner/average_learner1D.py    |  2 +-
 adaptive/learner/balancing_learner.py    | 46 +++++++------
 adaptive/learner/integrator_coeffs.py    |  2 +-
 adaptive/learner/integrator_learner.py   |  6 +-
 adaptive/learner/learner1D.py            |  2 +-
 adaptive/learner/learner2D.py            | 11 +--
 adaptive/learner/learnerND.py            | 86 ++++++++++++++----------
 adaptive/learner/skopt_learner.py        |  6 +-
 adaptive/notebook_integration.py         | 18 +++--
 adaptive/runner.py                       | 31 +++++----
 adaptive/tests/algorithm_4.py            |  4 +-
 adaptive/tests/test_average_learner1d.py |  4 +-
 adaptive/tests/test_balancing_learner.py |  2 +-
 adaptive/tests/test_cquad.py             | 14 ++--
 adaptive/tests/test_learner1d.py         | 10 +--
 adaptive/tests/test_learners.py          | 20 +++---
 adaptive/tests/test_pickling.py          | 16 ++---
 18 files changed, 154 insertions(+), 128 deletions(-)

diff --git a/adaptive/_version.py b/adaptive/_version.py
index 21293fb39..42d0147c4 100644
--- a/adaptive/_version.py
+++ b/adaptive/_version.py
@@ -203,4 +203,4 @@ def make_release_tree(self, base_dir, files):
         _write_version(os.path.join(base_dir, p, STATIC_VERSION_FILE))
 
 
-cmdclass = dict(sdist=_sdist, build_py=_build_py)
+cmdclass = {"sdist": _sdist, "build_py": _build_py}
diff --git a/adaptive/learner/average_learner1D.py b/adaptive/learner/average_learner1D.py
index aa4be9d71..04d2163ed 100644
--- a/adaptive/learner/average_learner1D.py
+++ b/adaptive/learner/average_learner1D.py
@@ -612,7 +612,7 @@ def plot(self):
         margin = 0.05 * (self.bounds[1] - self.bounds[0])
         plot_bounds = (self.bounds[0] - margin, self.bounds[1] + margin)
 
-        return p.redim(x=dict(range=plot_bounds))
+        return p.redim(x={"range": plot_bounds})
 
 
 def decreasing_dict() -> dict:
diff --git a/adaptive/learner/balancing_learner.py b/adaptive/learner/balancing_learner.py
index 593331792..da9fd24fc 100644
--- a/adaptive/learner/balancing_learner.py
+++ b/adaptive/learner/balancing_learner.py
@@ -105,7 +105,7 @@ def __init__(
         # Naively we would make 'function' a method, but this causes problems
         # when using executors from 'concurrent.futures' because we have to
         # pickle the whole learner.
-        self.function = partial(dispatch, [l.function for l in self.learners])  # type: ignore
+        self.function = partial(dispatch, [lrn.function for lrn in self.learners])  # type: ignore
 
         self._ask_cache = {}
         self._loss = {}
@@ -130,25 +130,25 @@ def new(self) -> BalancingLearner:
     @property
     def data(self) -> dict[tuple[int, Any], Any]:
         data = {}
-        for i, l in enumerate(self.learners):
-            data.update({(i, p): v for p, v in l.data.items()})
+        for i, lrn in enumerate(self.learners):
+            data.update({(i, p): v for p, v in lrn.data.items()})
         return data
 
     @property
     def pending_points(self) -> set[tuple[int, Any]]:
         pending_points = set()
-        for i, l in enumerate(self.learners):
-            pending_points.update({(i, p) for p in l.pending_points})
+        for i, lrn in enumerate(self.learners):
+            pending_points.update({(i, p) for p in lrn.pending_points})
         return pending_points
 
     @property
     def npoints(self) -> int:
-        return sum(l.npoints for l in self.learners)
+        return sum(lrn.npoints for lrn in self.learners)
 
     @property
     def nsamples(self):
         if hasattr(self.learners[0], "nsamples"):
-            return sum(l.nsamples for l in self.learners)
+            return sum(lrn.nsamples for lrn in self.learners)
         else:
             raise AttributeError(
                 f"{type(self.learners[0])} as no attribute called `nsamples`."
@@ -187,7 +187,7 @@ def _ask_and_tell_based_on_loss_improvements(
         self, n: int
     ) -> tuple[list[tuple[int, Any]], list[float]]:
         selected = []  # tuples ((learner_index, point), loss_improvement)
-        total_points = [l.npoints + len(l.pending_points) for l in self.learners]
+        total_points = [lrn.npoints + len(lrn.pending_points) for lrn in self.learners]
         for _ in range(n):
             to_select = []
             for index, learner in enumerate(self.learners):
@@ -212,7 +212,7 @@ def _ask_and_tell_based_on_loss(
         self, n: int
     ) -> tuple[list[tuple[int, Any]], list[float]]:
         selected = []  # tuples ((learner_index, point), loss_improvement)
-        total_points = [l.npoints + len(l.pending_points) for l in self.learners]
+        total_points = [lrn.npoints + len(lrn.pending_points) for lrn in self.learners]
         for _ in range(n):
             losses = self._losses(real=False)
             index, _ = max(
@@ -235,7 +235,7 @@ def _ask_and_tell_based_on_npoints(
         self, n: numbers.Integral
     ) -> tuple[list[tuple[numbers.Integral, Any]], list[float]]:
         selected = []  # tuples ((learner_index, point), loss_improvement)
-        total_points = [l.npoints + len(l.pending_points) for l in self.learners]
+        total_points = [lrn.npoints + len(lrn.pending_points) for lrn in self.learners]
         for _ in range(n):
             index = np.argmin(total_points)
             # Take the points from the cache
@@ -356,7 +356,9 @@ def plot(
             keys, values_list = cdims
             cdims = [dict(zip(keys, values)) for values in values_list]
 
-        mapping = {tuple(_cdims.values()): l for l, _cdims in zip(self.learners, cdims)}
+        mapping = {
+            tuple(_cdims.values()): lrn for lrn, _cdims in zip(self.learners, cdims)
+        }
 
         d = defaultdict(list)
         for _cdims in cdims:
@@ -526,11 +528,11 @@ def save(
         >>> learner.save(combo_fname)  # use 'load' in the same way
         """
         if isinstance(fname, Iterable):
-            for l, _fname in zip(self.learners, fname):
-                l.save(_fname, compress=compress)
+            for lrn, _fname in zip(self.learners, fname):
+                lrn.save(_fname, compress=compress)
         else:
-            for l in self.learners:
-                l.save(fname(l), compress=compress)
+            for lrn in self.learners:
+                lrn.save(fname(lrn), compress=compress)
 
     def load(
         self,
@@ -554,18 +556,18 @@ def load(
         See the example in the `BalancingLearner.save` doc-string.
         """
         if isinstance(fname, Iterable):
-            for l, _fname in zip(self.learners, fname):
-                l.load(_fname, compress=compress)
+            for lrn, _fname in zip(self.learners, fname):
+                lrn.load(_fname, compress=compress)
         else:
-            for l in self.learners:
-                l.load(fname(l), compress=compress)
+            for lrn in self.learners:
+                lrn.load(fname(lrn), compress=compress)
 
     def _get_data(self) -> list[Any]:
-        return [l._get_data() for l in self.learners]
+        return [lrn._get_data() for lrn in self.learners]
 
     def _set_data(self, data: list[Any]):
-        for l, _data in zip(self.learners, data):
-            l._set_data(_data)
+        for lrn, _data in zip(self.learners, data):
+            lrn._set_data(_data)
 
     def __getstate__(self) -> tuple[list[BaseLearner], CDIMS_TYPE, STRATEGY_TYPE]:
         return (
diff --git a/adaptive/learner/integrator_coeffs.py b/adaptive/learner/integrator_coeffs.py
index 711f30b76..55a57de9b 100644
--- a/adaptive/learner/integrator_coeffs.py
+++ b/adaptive/learner/integrator_coeffs.py
@@ -191,4 +191,4 @@ def __getattr__(name):
     try:
         return _coefficients()[name]
     except KeyError:
-        raise AttributeError(f"module {__name__} has no attribute {name}")
+        raise AttributeError(f"module {__name__} has no attribute {name}") from None
diff --git a/adaptive/learner/integrator_learner.py b/adaptive/learner/integrator_learner.py
index f0712778d..ae2a18670 100644
--- a/adaptive/learner/integrator_learner.py
+++ b/adaptive/learner/integrator_learner.py
@@ -471,7 +471,7 @@ def _ask_and_tell_pending(self, n: int) -> tuple[list[float], list[float]]:
             try:
                 self._fill_stack()
             except ValueError:
-                raise RuntimeError("No way to improve the integral estimate.")
+                raise RuntimeError("No way to improve the integral estimate.") from None
             new_points, new_loss_improvements = self.pop_from_stack(n_left)
             points += new_points
             loss_improvements += new_loss_improvements
@@ -513,8 +513,8 @@ def _fill_stack(self) -> list[float]:
         elif ival.depth == 3 or force_split:
             # Always split when depth is maximal or if refining didn't help
             self.ivals.remove(ival)
-            for ival in ival.split():
-                self.add_ival(ival)
+            for iv in ival.split():
+                self.add_ival(iv)
         else:
             self.add_ival(ival.refine())
 
diff --git a/adaptive/learner/learner1D.py b/adaptive/learner/learner1D.py
index d147a5b84..3f2cc70c6 100644
--- a/adaptive/learner/learner1D.py
+++ b/adaptive/learner/learner1D.py
@@ -829,7 +829,7 @@ def plot(self, *, scatter_or_line: str = "scatter"):
         margin = 0.05 * (self.bounds[1] - self.bounds[0])
         plot_bounds = (self.bounds[0] - margin, self.bounds[1] + margin)
 
-        return p.redim(x=dict(range=plot_bounds))
+        return p.redim(x={"range": plot_bounds})
 
     def remove_unfinished(self) -> None:
         self.pending_points = set()
diff --git a/adaptive/learner/learner2D.py b/adaptive/learner/learner2D.py
index 048c1f26f..c565a6caa 100644
--- a/adaptive/learner/learner2D.py
+++ b/adaptive/learner/learner2D.py
@@ -603,6 +603,7 @@ def ip(self) -> LinearNDInterpolator:
             "`learner.ip()` is deprecated, use `learner.interpolator(scaled=True)`."
             " This will be removed in v1.0.",
             DeprecationWarning,
+            stacklevel=2,
         )
         return self.interpolator(scaled=True)
 
@@ -682,7 +683,7 @@ def _fill_stack(
 
         points_new = []
         losses_new = []
-        for j, _ in enumerate(losses):
+        for _j, _ in enumerate(losses):
             jsimplex = np.argmax(losses)
             triangle = ip.tri.points[ip.tri.simplices[jsimplex]]
             point_new = choose_point_in_triangle(triangle, max_badness=5)
@@ -690,7 +691,7 @@ def _fill_stack(
 
             # np.clip results in numerical precision problems
             # https://github.com/python-adaptive/adaptive/issues/7
-            clip = lambda x, l, u: max(l, min(u, x))  # noqa: E731
+            clip = lambda x, lo, up: max(lo, min(up, x))  # noqa: E731
             point_new = (
                 clip(point_new[0], *self.bounds[0]),
                 clip(point_new[1], *self.bounds[1]),
@@ -818,9 +819,9 @@ def plot(self, n=None, tri_alpha=0):
             im = hv.Image([], bounds=lbrt)
             tris = hv.EdgePaths([])
 
-        im_opts = dict(cmap="viridis")
-        tri_opts = dict(line_width=0.5, alpha=tri_alpha)
-        no_hover = dict(plot=dict(inspection_policy=None, tools=[]))
+        im_opts = {"cmap": "viridis"}
+        tri_opts = {"line_width": 0.5, "alpha": tri_alpha}
+        no_hover = {"plot": {"inspection_policy": None, "tools": []}}
 
         return im.opts(style=im_opts) * tris.opts(style=tri_opts, **no_hover)
 
diff --git a/adaptive/learner/learnerND.py b/adaptive/learner/learnerND.py
index 014692f12..e48527efc 100644
--- a/adaptive/learner/learnerND.py
+++ b/adaptive/learner/learnerND.py
@@ -329,11 +329,11 @@ def __init__(self, func, bounds, loss_per_simplex=None):
         self.bounds = bounds
         if isinstance(bounds, scipy.spatial.ConvexHull):
             hull_points = bounds.points[bounds.vertices]
-            self._bounds_points = sorted(list(map(tuple, hull_points)))
+            self._bounds_points = sorted(map(tuple, hull_points))
             self._bbox = tuple(zip(hull_points.min(axis=0), hull_points.max(axis=0)))
             self._interior = scipy.spatial.Delaunay(self._bounds_points)
         else:
-            self._bounds_points = sorted(list(map(tuple, itertools.product(*bounds))))
+            self._bounds_points = sorted(map(tuple, itertools.product(*bounds)))
             self._bbox = tuple(tuple(map(float, b)) for b in bounds)
             self._interior = None
 
@@ -341,12 +341,12 @@ def __init__(self, func, bounds, loss_per_simplex=None):
 
         self.function = func
         self._tri = None
-        self._losses = dict()
+        self._losses = {}
 
-        self._pending_to_simplex = dict()  # vertex → simplex
+        self._pending_to_simplex = {}  # vertex → simplex
 
         # triangulation of the pending points inside a specific simplex
-        self._subtriangulations = dict()  # simplex → triangulation
+        self._subtriangulations = {}  # simplex → triangulation
 
         # scale to unit hypercube
         # for the input
@@ -446,7 +446,7 @@ def to_dataframe(
                 f"point_names ({point_names}) should have the"
                 f" same length as learner.ndims ({self.ndim})"
             )
-        data = list((*x, y) for x, y in self.data.items())
+        data = [(*x, y) for x, y in self.data.items()]
         df = pandas.DataFrame(data, columns=[*point_names, value_name])
         df.attrs["inputs"] = list(point_names)
         df.attrs["output"] = value_name
@@ -865,14 +865,14 @@ def _update_range(self, new_output):
     @cache_latest
     def loss(self, real=True):
         # XXX: compute pending loss if real == False
-        losses = self._losses if self.tri is not None else dict()
+        losses = self._losses if self.tri is not None else {}
         return max(losses.values()) if losses else float("inf")
 
     def remove_unfinished(self):
         # XXX: implement this method
         self.pending_points = set()
-        self._subtriangulations = dict()
-        self._pending_to_simplex = dict()
+        self._subtriangulations = {}
+        self._pending_to_simplex = {}
 
     ##########################
     # Plotting related stuff #
@@ -933,9 +933,9 @@ def plot(self, n=None, tri_alpha=0):
             im = hv.Image([], bounds=lbrt)
             tris = hv.EdgePaths([])
 
-        im_opts = dict(cmap="viridis")
-        tri_opts = dict(line_width=0.5, alpha=tri_alpha)
-        no_hover = dict(plot=dict(inspection_policy=None, tools=[]))
+        im_opts = {"cmap": "viridis"}
+        tri_opts = {"line_width": 0.5, "alpha": tri_alpha}
+        no_hover = {"plot": {"inspection_policy": None, "tools": []}}
 
         return im.opts(style=im_opts) * tris.opts(style=tri_opts, **no_hover)
 
@@ -973,7 +973,7 @@ def plot_slice(self, cut_mapping, n=None):
             # Plot with 5% margins such that the boundary points are visible
             margin = 0.05 / self._transform[ind, ind]
             plot_bounds = (x.min() - margin, x.max() + margin)
-            return p.redim(x=dict(range=plot_bounds))
+            return p.redim(x={"range": plot_bounds})
 
         elif plot_dim == 2:
             if self.vdim > 1:
@@ -1005,7 +1005,7 @@ def plot_slice(self, cut_mapping, n=None):
             else:
                 im = hv.Image([], bounds=lbrt)
 
-            return im.opts(style=dict(cmap="viridis"))
+            return im.opts(style={"cmap": "viridis"})
         else:
             raise ValueError("Only 1 or 2-dimensional plots can be generated.")
 
@@ -1047,20 +1047,20 @@ def plot_3D(self, with_triangulation=False, return_fig=False):
                     y=Ye,
                     z=Ze,
                     mode="lines",
-                    line=dict(color="rgb(125,125,125)", width=1),
+                    line={"color": "rgb(125,125,125)", "width": 1},
                     hoverinfo="none",
                 )
             )
 
         Xn, Yn, Zn = zip(*vertices)
         colors = [self.data[p] for p in self.tri.vertices]
-        marker = dict(
-            symbol="circle",
-            size=3,
-            color=colors,
-            colorscale="Viridis",
-            line=dict(color="rgb(50,50,50)", width=0.5),
-        )
+        marker = {
+            "symbol": "circle",
+            "size": 3,
+            "color": colors,
+            "colorscale": "Viridis",
+            "line": {"color": "rgb(50,50,50)", "width": 0.5},
+        }
 
         plots.append(
             plotly.graph_objs.Scatter3d(
@@ -1074,19 +1074,19 @@ def plot_3D(self, with_triangulation=False, return_fig=False):
             )
         )
 
-        axis = dict(
-            showbackground=False,
-            showline=False,
-            zeroline=False,
-            showgrid=False,
-            showticklabels=False,
-            title="",
-        )
+        axis = {
+            "showbackground": False,
+            "showline": False,
+            "zeroline": False,
+            "showgrid": False,
+            "showticklabels": False,
+            "title": "",
+        }
 
         layout = plotly.graph_objs.Layout(
             showlegend=False,
-            scene=dict(xaxis=axis, yaxis=axis, zaxis=axis),
-            margin=dict(t=100),
+            scene={"xaxis": axis, "yaxis": axis, "zaxis": axis},
+            margin={"t": 100},
             hovermode="closest",
         )
 
@@ -1193,15 +1193,15 @@ def plot_isoline(self, level=0.0, n=None, tri_alpha=0):
             plot = self.plot(n=n, tri_alpha=tri_alpha)
 
         if isinstance(level, Iterable):
-            for l in level:
-                plot = plot * self.plot_isoline(level=l, n=-1)
+            for lvl in level:
+                plot = plot * self.plot_isoline(level=lvl, n=-1)
             return plot
 
         vertices, lines = self._get_iso(level, which="line")
         paths = [[vertices[i], vertices[j]] for i, j in lines]
         contour = hv.Path(paths)
 
-        contour_opts = dict(color="black")
+        contour_opts = {"color": "black"}
         contour = contour.opts(style=contour_opts)
         return plot * contour
 
@@ -1233,7 +1233,13 @@ def plot_isosurface(self, level=0.0, hull_opacity=0.2):
         )
         isosurface = fig.data[0]
         isosurface.update(
-            lighting=dict(ambient=1, diffuse=1, roughness=1, specular=0, fresnel=0)
+            lighting={
+                "ambient": 1,
+                "diffuse": 1,
+                "roughness": 1,
+                "specular": 0,
+                "fresnel": 0,
+            }
         )
 
         if hull_opacity < 1e-3:
@@ -1269,7 +1275,13 @@ def _get_plane_color(simplex):
 
         x, y, z = zip(*self._bounds_points)
         i, j, k = hull.simplices.T
-        lighting = dict(ambient=1, diffuse=1, roughness=1, specular=0, fresnel=0)
+        lighting = {
+            "ambient": 1,
+            "diffuse": 1,
+            "roughness": 1,
+            "specular": 0,
+            "fresnel": 0,
+        }
         return plotly.graph_objs.Mesh3d(
             x=x,
             y=y,
diff --git a/adaptive/learner/skopt_learner.py b/adaptive/learner/skopt_learner.py
index e12f49daa..dd39f83cb 100644
--- a/adaptive/learner/skopt_learner.py
+++ b/adaptive/learner/skopt_learner.py
@@ -98,12 +98,12 @@ def plot(self, nsamples=200):
                 xsp = self.space.transform(xs.reshape(-1, 1).tolist())
                 y_pred, sigma = model.predict(xsp, return_std=True)
                 # Plot model prediction for function
-                curve = hv.Curve((xs, y_pred)).opts(style=dict(line_dash="dashed"))
+                curve = hv.Curve((xs, y_pred)).opts(style={"line_dash": "dashed"})
                 # Plot 95% confidence interval as colored area around points
                 area = hv.Area(
                     (xs, y_pred - 1.96 * sigma, y_pred + 1.96 * sigma),
                     vdims=["y", "y2"],
-                ).opts(style=dict(alpha=0.5, line_alpha=0))
+                ).opts(style={"alpha": 0.5, "line_alpha": 0})
 
             else:
                 area = hv.Area([])
@@ -114,7 +114,7 @@ def plot(self, nsamples=200):
         margin = 0.05 * (bounds[1] - bounds[0])
         plot_bounds = (bounds[0] - margin, bounds[1] + margin)
 
-        return p.redim(x=dict(range=plot_bounds))
+        return p.redim(x={"range": plot_bounds})
 
     def _get_data(self):
         return [x[0] for x in self.Xi], self.yi
diff --git a/adaptive/notebook_integration.py b/adaptive/notebook_integration.py
index 60329110e..33ef01bec 100644
--- a/adaptive/notebook_integration.py
+++ b/adaptive/notebook_integration.py
@@ -33,7 +33,9 @@ def notebook_extension(*, _inline_js=True):
             _holoviews_enabled = True
     except ModuleNotFoundError:
         warnings.warn(
-            "holoviews is not installed; plotting is disabled.", RuntimeWarning
+            "holoviews is not installed; plotting is disabled.",
+            RuntimeWarning,
+            stacklevel=2,
         )
 
     # Load ipywidgets
@@ -44,7 +46,9 @@ def notebook_extension(*, _inline_js=True):
             _ipywidgets_enabled = True
     except ModuleNotFoundError:
         warnings.warn(
-            "ipywidgets is not installed; live_info is disabled.", RuntimeWarning
+            "ipywidgets is not installed; live_info is disabled.",
+            RuntimeWarning,
+            stacklevel=2,
         )
 
     # Enable asyncio integration
@@ -57,7 +61,9 @@ def ensure_holoviews():
     try:
         return importlib.import_module("holoviews")
     except ModuleNotFoundError:
-        raise RuntimeError("holoviews is not installed; plotting is disabled.")
+        raise RuntimeError(
+            "holoviews is not installed; plotting is disabled."
+        ) from None
 
 
 def ensure_plotly():
@@ -74,8 +80,8 @@ def ensure_plotly():
             plotly.offline.init_notebook_mode()
             _plotly_enabled = True
         return plotly
-    except ModuleNotFoundError:
-        raise RuntimeError("plotly is not installed; plotting is disabled.")
+    except ModuleNotFoundError as e:
+        raise RuntimeError("plotly is not installed; plotting is disabled.") from e
 
 
 def in_ipynb() -> bool:
@@ -88,7 +94,7 @@ def in_ipynb() -> bool:
 
 # Fancy displays in the Jupyter notebook
 
-active_plotting_tasks = dict()
+active_plotting_tasks = {}
 
 
 def live_plot(runner, *, plotter=None, update_interval=2, name=None, normalize=True):
diff --git a/adaptive/runner.py b/adaptive/runner.py
index 9d1a24206..a5fbb5070 100644
--- a/adaptive/runner.py
+++ b/adaptive/runner.py
@@ -644,14 +644,14 @@ def __init__(
         ):
             try:
                 pickle.dumps(learner.function)
-            except pickle.PicklingError:
+            except pickle.PicklingError as e:
                 raise ValueError(
                     "`learner.function` cannot be pickled (is it a lamdba function?)"
                     " and therefore does not work with the default executor."
                     " Either make sure the function is pickleble or use an executor"
                     " that might work with 'hard to pickle'-functions"
                     " , e.g. `ipyparallel` with `dill`."
-                )
+                ) from e
 
         super().__init__(
             learner,
@@ -689,7 +689,8 @@ def __init__(
                 "The runner has been scheduled, but the asyncio "
                 "event loop is not running! If you are "
                 "in a Jupyter notebook, remember to run "
-                "'adaptive.notebook_extension()'"
+                "'adaptive.notebook_extension()'",
+                stacklevel=2,
             )
 
     def _submit(self, x: Any) -> asyncio.Task | asyncio.Future:
@@ -1060,13 +1061,13 @@ def auto_goal(
     -------
     Callable[[adaptive.BaseLearner], bool]
     """
-    kw = dict(
-        loss=loss,
-        npoints=npoints,
-        end_time=end_time,
-        duration=duration,
-        allow_running_forever=allow_running_forever,
-    )
+    kw = {
+        "loss": loss,
+        "npoints": npoints,
+        "end_time": end_time,
+        "duration": duration,
+        "allow_running_forever": allow_running_forever,
+    }
     opts = (loss, npoints, end_time, duration)  # all are mutually exclusive
     if sum(v is not None for v in opts) > 1:
         raise ValueError(
@@ -1079,8 +1080,10 @@ def auto_goal(
         # Note that the float loss goal is more efficiently implemented in the
         # BalancingLearner itself. That is why the previous if statement is
         # above this one.
-        goals = [auto_goal(learner=l, **kw) for l in learner.learners]
-        return lambda learner: all(goal(l) for l, goal in zip(learner.learners, goals))
+        goals = [auto_goal(learner=lrn, **kw) for lrn in learner.learners]
+        return lambda learner: all(
+            goal(lrn) for lrn, goal in zip(learner.learners, goals)
+        )
     if npoints is not None:
         return lambda learner: learner.npoints >= npoints
     if end_time is not None:
@@ -1099,7 +1102,9 @@ def auto_goal(
                 "Goal is None which means the learners"
                 " continue forever and this is not allowed."
             )
-        warnings.warn("Goal is None which means the learners continue forever!")
+        warnings.warn(
+            "Goal is None which means the learners continue forever!", stacklevel=2
+        )
         return lambda _: False
     raise ValueError("Cannot determine goal from {goal}.")
 
diff --git a/adaptive/tests/algorithm_4.py b/adaptive/tests/algorithm_4.py
index 4566c0fa1..180149ec2 100644
--- a/adaptive/tests/algorithm_4.py
+++ b/adaptive/tests/algorithm_4.py
@@ -307,7 +307,7 @@ def refine(self, f: Callable) -> Tuple[np.ndarray, bool, int]:
 
 
 def algorithm_4(
-    f: Callable, a: int, b: int, tol: float, N_loops: int = int(1e9)
+    f: Callable, a: int, b: int, tol: float, N_loops: int = int(1e9)  # noqa: B008
 ) -> Tuple[float, float, int, List["_Interval"]]:
     """ALGORITHM_4 evaluates an integral using adaptive quadrature. The
     algorithm uses Clenshaw-Curtis quadrature rules of increasing
@@ -578,7 +578,7 @@ def F(x):
             if alpha <= -1:
                 false_positives += 1
             else:
-                igral_exact = F(1) - F(0)
+                igral_exact = F(1, alpha, beta) - F(0, alpha, beta)
                 assert alpha < -0.7 or abs(igral - igral_exact) < err
 
     assert false_negatives < 0.05 * n
diff --git a/adaptive/tests/test_average_learner1d.py b/adaptive/tests/test_average_learner1d.py
index 8b2670d77..d76a034c7 100644
--- a/adaptive/tests/test_average_learner1d.py
+++ b/adaptive/tests/test_average_learner1d.py
@@ -24,8 +24,8 @@ def almost_equal_dicts(a, b):
         else:
             try:
                 np.testing.assert_almost_equal(v1, v2)
-            except TypeError:
-                raise AssertionError(f"{v1} != {v2}")
+            except TypeError as e:
+                raise AssertionError(f"{v1} != {v2}") from e
 
 
 def test_tell_many_at_point():
diff --git a/adaptive/tests/test_balancing_learner.py b/adaptive/tests/test_balancing_learner.py
index b2e513698..72b1bc8f3 100644
--- a/adaptive/tests/test_balancing_learner.py
+++ b/adaptive/tests/test_balancing_learner.py
@@ -54,7 +54,7 @@ def test_ask_0(strategy):
     [
         ("loss", "loss_goal", 0.1),
         ("loss_improvements", "loss_goal", 0.1),
-        ("npoints", "goal", lambda bl: all(l.npoints > 10 for l in bl.learners)),
+        ("npoints", "goal", lambda bl: all(lrn.npoints > 10 for lrn in bl.learners)),
         ("cycle", "loss_goal", 0.1),
     ],
 )
diff --git a/adaptive/tests/test_cquad.py b/adaptive/tests/test_cquad.py
index fa6c1fcf9..15e5f1e27 100644
--- a/adaptive/tests/test_cquad.py
+++ b/adaptive/tests/test_cquad.py
@@ -138,7 +138,7 @@ def test_adding_points_and_skip_one_point():
         if x != skip_x:
             learner.tell(x, learner.function(x))
 
-    for i in range(1000):
+    for _i in range(1000):
         xs, _ = learner.ask(1)
         for x in xs:
             if x != skip_x:
@@ -150,7 +150,7 @@ def test_adding_points_and_skip_one_point():
     # Create a learner with the same number of points, which should
     # give an identical igral value.
     learner2 = IntegratorLearner(f24, bounds=(0, 3), tol=1e-10)
-    for i in range(1017):
+    for _i in range(1017):
         xs, _ = learner2.ask(1)
         for x in xs:
             learner2.tell(x, learner2.function(x))
@@ -190,19 +190,19 @@ def test_tell_in_random_order(first_add_33=False):
 
         # Test whether approximating_intervals gives a complete set of intervals
         for learner in learners:
-            ivals = sorted(learner.approximating_intervals, key=lambda l: l.a)
+            ivals = sorted(learner.approximating_intervals, key=lambda lrn: lrn.a)
             for i in range(len(ivals) - 1):
                 assert ivals[i].b == ivals[i + 1].a, (ivals[i], ivals[i + 1])
 
         # Test if approximating_intervals is the same for random order of adding the point
         ivals = [
             sorted(ival, key=attrgetter("a"))
-            for ival in [l.approximating_intervals for l in learners]
+            for ival in [lrn.approximating_intervals for lrn in learners]
         ]
         assert all(ival.a == other_ival.a for ival, other_ival in zip(*ivals))
 
         # Test if the approximating_intervals are the same
-        ivals = [{(i.a, i.b) for i in l.approximating_intervals} for l in learners]
+        ivals = [{(i.a, i.b) for i in lrn.approximating_intervals} for lrn in learners]
         assert ivals[0] == ivals[1]
 
         # Test whether the igral is identical
@@ -210,7 +210,7 @@ def test_tell_in_random_order(first_add_33=False):
 
         # Compare if the errors are in line with the sequential case
         igral, err, *_ = algorithm_4(f, a, b, tol=tol)
-        assert all((l.err + err >= abs(l.igral - igral)) for l in learners)
+        assert all((lrn.err + err >= abs(lrn.igral - igral)) for lrn in learners)
 
         # Check that the errors are finite
         for learner in learners:
@@ -233,7 +233,7 @@ def test_approximating_intervals():
     for x in xs:
         learner.tell(x, f24(x))
 
-    ivals = sorted(learner.approximating_intervals, key=lambda l: l.a)
+    ivals = sorted(learner.approximating_intervals, key=lambda lrn: lrn.a)
     for i in range(len(ivals) - 1):
         assert ivals[i].b == ivals[i + 1].a, (ivals[i], ivals[i + 1])
 
diff --git a/adaptive/tests/test_learner1d.py b/adaptive/tests/test_learner1d.py
index d0d595594..dceb28797 100644
--- a/adaptive/tests/test_learner1d.py
+++ b/adaptive/tests/test_learner1d.py
@@ -103,7 +103,7 @@ def test_loss_interpolation():
 
     learner.tell(-1, 0)
     learner.tell(1, 0)
-    for i in range(100):
+    for _i in range(100):
         # Add a 100 points with either None or 0
         if random.random() < 0.9:
             learner.tell_pending(random.uniform(-1, 1))
@@ -166,7 +166,7 @@ def test_loss_at_machine_precision_interval_is_zero():
     def f(x):
         return 1 if x == 0 else 0
 
-    def goal(l):
+    def goal(learner):
         return learner.loss() < 0.01 or learner.npoints >= 1000
 
     learner = Learner1D(f, bounds=(-1, 1))
@@ -193,7 +193,7 @@ def test_small_deviations():
     # parallel execution
     stash = []
 
-    for i in range(100):
+    for _i in range(100):
         xs, _ = learner.ask(10)
 
         # Save 5 random points out of `xs` for later
@@ -323,7 +323,7 @@ def _random_run(learner, learner2, scale_doubling=True):
             learner2.tell(x, max_value)
 
         stash = []
-        for i in range(10):
+        for _i in range(10):
             xs, _ = learner.ask(10)
             for x in xs:
                 learner2.tell_pending(x)
@@ -409,4 +409,4 @@ def test_inf_loss_with_missing_bounds():
     # must be done in parallel because otherwise the bounds will be evaluated first
     BlockingRunner(learner, loss_goal=0.01)
 
-    learner.npoints > 20
+    assert learner.npoints > 20
diff --git a/adaptive/tests/test_learners.py b/adaptive/tests/test_learners.py
index e800b6d39..6f3ee916e 100644
--- a/adaptive/tests/test_learners.py
+++ b/adaptive/tests/test_learners.py
@@ -92,9 +92,9 @@ def uniform(a, b):
 def simple_run(learner, n):
     def get_goal(learner):
         if hasattr(learner, "nsamples"):
-            return lambda l: l.nsamples > n
+            return lambda lrn: lrn.nsamples > n
         else:
-            return lambda l: l.npoints > n
+            return lambda lrn: lrn.npoints > n
 
     def goal():
         if isinstance(learner, BalancingLearner):
@@ -504,7 +504,7 @@ def scale_x(x):
         # Because the LearnerND is slow
         npoints //= 10
 
-    for n in range(npoints):
+    for _n in range(npoints):
         cxs, _ = control.ask(1)
         xs, _ = learner.ask(1)
         control.tell_many(cxs, [control.function(x) for x in cxs])
@@ -540,7 +540,7 @@ def test_balancing_learner(learner_type, f, learner_kwargs):
     # Emulate parallel execution
     stash = []
 
-    for i in range(100):
+    for _i in range(100):
         n = random.randint(1, 10)
         m = random.randint(0, n)
         xs, _ = learner.ask(n, tell_pending=False)
@@ -560,11 +560,11 @@ def test_balancing_learner(learner_type, f, learner_kwargs):
             learner.tell(x, learner.function(x))
 
     if learner_type is AverageLearner1D:
-        nsamples = [l.nsamples for l in learner.learners]
-        assert all(l.nsamples > 5 for l in learner.learners), nsamples
+        nsamples = [lrn.nsamples for lrn in learner.learners]
+        assert all(lrn.nsamples > 5 for lrn in learner.learners), nsamples
     else:
-        npoints = [l.npoints for l in learner.learners]
-        assert all(l.npoints > 5 for l in learner.learners), npoints
+        npoints = [lrn.npoints for lrn in learner.learners]
+        assert all(lrn.npoints > 5 for lrn in learner.learners), npoints
 
 
 @run_with(
@@ -617,8 +617,8 @@ def test_saving_of_balancing_learner(learner_type, f, learner_kwargs):
     control = learner.new()
 
     if learner_type in (Learner1D, AverageLearner1D):
-        for l, c in zip(learner.learners, control.learners):
-            l._recompute_losses_factor = 1
+        for lrn, c in zip(learner.learners, control.learners):
+            lrn._recompute_losses_factor = 1
             c._recompute_losses_factor = 1
 
     simple_run(learner, 100)
diff --git a/adaptive/tests/test_pickling.py b/adaptive/tests/test_pickling.py
index baf5b1146..9721fd273 100644
--- a/adaptive/tests/test_pickling.py
+++ b/adaptive/tests/test_pickling.py
@@ -61,17 +61,17 @@ def balancing_learner(f, learner_type, learner_kwargs):
 
 
 learners_pairs = [
-    (Learner1D, dict(bounds=(-1, 1))),
-    (Learner2D, dict(bounds=((-1, 1), (-1, 1)))),
-    (SequenceLearner, dict(sequence=list(range(100)))),
-    (IntegratorLearner, dict(bounds=(0, 1), tol=1e-3)),
-    (AverageLearner, dict(atol=0.1)),
-    (datasaver, dict(learner_type=Learner1D, learner_kwargs=dict(bounds=(-1, 1)))),
+    (Learner1D, {"bounds": (-1, 1)}),
+    (Learner2D, {"bounds": ((-1, 1), (-1, 1))}),
+    (SequenceLearner, {"sequence": list(range(100))}),
+    (IntegratorLearner, {"bounds": (0, 1), "tol": 1e-3}),
+    (AverageLearner, {"atol": 0.1}),
+    (datasaver, {"learner_type": Learner1D, "learner_kwargs": {"bounds": (-1, 1)}}),
     (
         balancing_learner,
-        dict(learner_type=Learner1D, learner_kwargs=dict(bounds=(-1, 1))),
+        {"learner_type": Learner1D, "learner_kwargs": {"bounds": (-1, 1)}},
     ),
-    (LearnerND, dict(bounds=((-1, 1), (-1, 1), (-1, 1)))),
+    (LearnerND, {"bounds": ((-1, 1), (-1, 1), (-1, 1))}),
 ]
 
 serializers = [(pickle, pickleable_f)]

From 05fc5459e0afdf3d3b80d2c462230c2768a48620 Mon Sep 17 00:00:00 2001
From: Bas Nijholt <bas@nijho.lt>
Date: Fri, 7 Apr 2023 00:12:22 -0700
Subject: [PATCH 3/3] Last fix

---
 .pre-commit-config.yaml | 4 ++--
 adaptive/utils.py       | 3 ++-
 2 files changed, 4 insertions(+), 3 deletions(-)

diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 7698bb4bd..edaef1c72 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -9,11 +9,11 @@ repos:
       - id: debug-statements
       - id: check-ast
   - repo: https://github.com/psf/black
-    rev: 23.1.0
+    rev: 23.3.0
     hooks:
       - id: black
   - repo: https://github.com/charliermarsh/ruff-pre-commit
-    rev: "v0.0.259"
+    rev: "v0.0.261"
     hooks:
       - id: ruff
         args: ["--fix"]
diff --git a/adaptive/utils.py b/adaptive/utils.py
index c3905df5c..f2c23aa71 100644
--- a/adaptive/utils.py
+++ b/adaptive/utils.py
@@ -153,7 +153,8 @@ def partial_function_from_dataframe(function, df, function_prefix: str = "functi
             warnings.warn(
                 f"The DataFrame contains a default parameter"
                 f" ({k}={v}) but the function already has a default ({k}={default})."
-                " The DataFrame's value will be used."
+                " The DataFrame's value will be used.",
+                stacklevel=2,
             )
     return functools.partial(function, **kwargs)