From 6346d39babd30aa75277f567d5dd7e20ad6d23fc Mon Sep 17 00:00:00 2001 From: Chris Sewell Date: Thu, 20 Aug 2020 03:45:01 +0100 Subject: [PATCH 1/7] =?UTF-8?q?=F0=9F=91=8C=20IMPROVE:=20Standardise=20aut?= =?UTF-8?q?o/cache=20execution?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Both now call the same underlying function (from jupyter-cache) and act the same. This improves auto, by making it output error reports and not raising an exception on an error. Additional config has also been added: `execution_allow_errors` and `execution_in_temp`. Like for timeout, `allow_errors` can also be set in the notebook metadata.execution.allow_errors This presents one breaking change, in that `auto` will now by default execute in a temporary folder as the cwd. --- myst_nb/__init__.py | 2 + myst_nb/cache.py | 105 +++++++++++++----- setup.py | 3 +- tests/test_execute.py | 53 +++++++-- .../test_execute/test_allow_errors_auto.ipynb | 56 ++++++++++ tests/test_execute/test_allow_errors_auto.xml | 12 ++ .../test_allow_errors_cache.ipynb | 56 ++++++++++ .../test_execute/test_allow_errors_cache.xml | 12 ++ .../test_basic_failing_auto.ipynb | 56 ++++++++++ .../test_execute/test_basic_failing_auto.xml | 12 ++ ...g.ipynb => test_basic_failing_cache.ipynb} | 0 ...iling.xml => test_basic_failing_cache.xml} | 0 ...ient.ipynb => test_basic_unrun_auto.ipynb} | 0 ...ic_unrun.xml => test_basic_unrun_auto.xml} | 0 ...run.ipynb => test_basic_unrun_cache.ipynb} | 0 ...bclient.xml => test_basic_unrun_cache.xml} | 0 ... => test_complex_outputs_unrun_auto.ipynb} | 0 ...ml => test_complex_outputs_unrun_auto.xml} | 0 ...=> test_complex_outputs_unrun_cache.ipynb} | 0 ...l => test_complex_outputs_unrun_cache.xml} | 0 20 files changed, 331 insertions(+), 36 deletions(-) create mode 100644 tests/test_execute/test_allow_errors_auto.ipynb create mode 100644 tests/test_execute/test_allow_errors_auto.xml create mode 100644 tests/test_execute/test_allow_errors_cache.ipynb create mode 100644 tests/test_execute/test_allow_errors_cache.xml create mode 100644 tests/test_execute/test_basic_failing_auto.ipynb create mode 100644 tests/test_execute/test_basic_failing_auto.xml rename tests/test_execute/{test_basic_failing.ipynb => test_basic_failing_cache.ipynb} (100%) rename tests/test_execute/{test_basic_failing.xml => test_basic_failing_cache.xml} (100%) rename tests/test_execute/{test_basic_unrun_nbclient.ipynb => test_basic_unrun_auto.ipynb} (100%) rename tests/test_execute/{test_basic_unrun.xml => test_basic_unrun_auto.xml} (100%) rename tests/test_execute/{test_basic_unrun.ipynb => test_basic_unrun_cache.ipynb} (100%) rename tests/test_execute/{test_basic_unrun_nbclient.xml => test_basic_unrun_cache.xml} (100%) rename tests/test_execute/{test_complex_outputs_unrun_nbclient.ipynb => test_complex_outputs_unrun_auto.ipynb} (100%) rename tests/test_execute/{test_complex_outputs_unrun_nbclient.xml => test_complex_outputs_unrun_auto.xml} (100%) rename tests/test_execute/{test_complex_outputs_unrun.ipynb => test_complex_outputs_unrun_cache.ipynb} (100%) rename tests/test_execute/{test_complex_outputs_unrun.xml => test_complex_outputs_unrun_cache.xml} (100%) diff --git a/myst_nb/__init__.py b/myst_nb/__init__.py index f6954149..e13ab6e8 100644 --- a/myst_nb/__init__.py +++ b/myst_nb/__init__.py @@ -104,6 +104,8 @@ def visit_element_html(self, node): app.add_config_value("execution_excludepatterns", [], "env") app.add_config_value("jupyter_execute_notebooks", "auto", "env") app.add_config_value("execution_timeout", 30, "env") + app.add_config_value("execution_allow_errors", False, "env") + app.add_config_value("execution_in_temp", True, "env") # show traceback in stdout (in addition to writing to file) # this is useful in e.g. RTD where one cannot inspect a file app.add_config_value("execution_show_tb", False, "") diff --git a/myst_nb/cache.py b/myst_nb/cache.py index ce4e125f..db91d188 100644 --- a/myst_nb/cache.py +++ b/myst_nb/cache.py @@ -10,20 +10,20 @@ """ import os +import tempfile from typing import List, Optional, Set import nbformat as nbf -from nbclient import execute from pathlib import Path from sphinx.application import Sphinx from sphinx.builders import Builder from sphinx.environment import BuildEnvironment from sphinx.util import logging -from sphinx.util.osutil import ensuredir from jupyter_cache import get_cache from jupyter_cache.executors import load_executor +from jupyter_cache.executors.utils import single_nb_execution from myst_parser.main import MdParserConfig @@ -66,10 +66,12 @@ def update_execution_cache( cache_base.discard_staged_notebook(docpath) _stage_and_execute( - app.env, - exec_docnames, - app.env.nb_path_to_cache, - app.config["execution_timeout"], + env=app.env, + exec_docnames=exec_docnames, + path_to_cache=app.env.nb_path_to_cache, + timeout=app.config["execution_timeout"], + allow_errors=app.config["execution_allow_errors"], + exec_in_temp=app.config["execution_in_temp"], ) return altered_docnames @@ -95,8 +97,6 @@ def generate_notebook_outputs( # If we have a jupyter_cache, see if there's a cache for this notebook file_path = file_path or env.doc2path(env.docname) - dest_path = Path(env.app.outdir) - reports_dir = str(dest_path) + "/reports" execution_method = env.config["jupyter_execute_notebooks"] # type: str @@ -109,14 +109,41 @@ def generate_notebook_outputs( if execution_method == "auto" and is_nb_with_outputs(file_path): LOGGER.info( - "Did not execute {}. " - "Set jupyter_execute_notebooks to `force` to execute".format( - env.docname - ) + "Did not execute %s. " + "Set jupyter_execute_notebooks to `force` to execute", + env.docname, ) else: - LOGGER.info("Executing: {}".format(env.docname)) - ntbk = execute(ntbk, cwd=Path(file_path).parent) + if env.config["execution_in_temp"]: + with tempfile.TemporaryDirectory() as tmpdirname: + LOGGER.info("Executing: %s in temporary directory", env.docname) + result = single_nb_execution( + ntbk, + cwd=tmpdirname, + timeout=env.config["execution_timeout"], + allow_errors=env.config["execution_allow_errors"], + ) + else: + cwd = Path(file_path).parent + LOGGER.info("Executing: %s in: %s", env.docname, cwd) + result = single_nb_execution( + ntbk, + cwd=cwd, + timeout=env.config["execution_timeout"], + allow_errors=env.config["execution_allow_errors"], + ) + + if result.err: + message = _report_exec_fail( + env, + Path(file_path).name, + result.exc_string, + show_traceback, + "Execution Failed with traceback saved in {}", + ) + LOGGER.error(message) + + ntbk = result.nb return ntbk @@ -136,17 +163,13 @@ def generate_notebook_outputs( except KeyError: stage_record = None if stage_record and stage_record.traceback: - # save the traceback to a log file - ensuredir(reports_dir) - file_name = os.path.splitext(r_file_path.name)[0] - full_path = reports_dir + "/{}.log".format(file_name) - with open(full_path, "w", encoding="utf8") as log_file: - log_file.write(stage_record.traceback) - message += "\n Last execution failed with traceback saved in {}".format( - full_path + message += _report_exec_fail( + env, + r_file_path.name, + stage_record.traceback, + show_traceback, + "\n Last execution failed with traceback saved in {}", ) - if show_traceback: - message += "\n" + stage_record.traceback LOGGER.error(message) @@ -174,11 +197,27 @@ def is_valid_exec_file(env: BuildEnvironment, docname: str) -> bool: return True +def _report_exec_fail( + env, file_name: str, traceback: str, show_traceback: bool, template: str, +): + """Save the traceback to a log file, and create log message.""" + reports_dir = Path(env.app.outdir).joinpath("reports") + reports_dir.mkdir(exist_ok=True) + full_path = reports_dir.joinpath(os.path.splitext(file_name)[0] + ".log") + full_path.write_text(traceback, encoding="utf8") + message = template.format(full_path) + if show_traceback: + message += "\n" + traceback + return message + + def _stage_and_execute( env: BuildEnvironment, exec_docnames: List[str], path_to_cache: str, timeout: Optional[int], + allow_errors: bool, + exec_in_temp: bool, ): pk_list = [] cache_base = get_cache(path_to_cache) @@ -191,7 +230,14 @@ def _stage_and_execute( # can leverage parallel execution implemented in jupyter-cache here try: - execute_staged_nb(cache_base, pk_list or None, timeout, env.myst_config) + execute_staged_nb( + cache_base, + pk_list or None, + timeout=timeout, + exec_in_temp=exec_in_temp, + allow_errors=allow_errors, + config=env.myst_config, + ) except OSError as err: # This is a 'fix' for obscure cases, such as if you # remove name.ipynb and add name.md (i.e. same name, different extension) @@ -207,7 +253,12 @@ def _stage_and_execute( def execute_staged_nb( - cache_base, pk_list, timeout: Optional[int], config: MdParserConfig + cache_base, + pk_list, + timeout: Optional[int], + exec_in_temp: bool, + allow_errors: bool, + config: MdParserConfig, ): """Executing the staged notebook.""" try: @@ -219,6 +270,8 @@ def execute_staged_nb( filter_pks=pk_list or None, converter=lambda p: path_to_notebook(p, config), timeout=timeout, + allow_errors=allow_errors, + run_in_temp=exec_in_temp, ) return result diff --git a/setup.py b/setup.py index a28d4141..5831af5b 100644 --- a/setup.py +++ b/setup.py @@ -45,11 +45,10 @@ "docutils>=0.15", "sphinx>=2,<4", "jupyter_sphinx~=0.2.4", - "jupyter-cache~=0.3.0", + "jupyter-cache~=0.4.0", "ipython", "nbformat~=5.0", "nbconvert~=5.6", - "nbclient", "pyyaml", "sphinx-togglebutton~=0.2.2", ], diff --git a/tests/test_execute.py b/tests/test_execute.py index 402ab315..bb8ff0af 100644 --- a/tests/test_execute.py +++ b/tests/test_execute.py @@ -1,10 +1,22 @@ import pytest +@pytest.mark.sphinx_params( + "basic_unrun.ipynb", conf={"jupyter_execute_notebooks": "auto"} +) +def test_basic_unrun_auto(sphinx_run, file_regression, check_nbs): + sphinx_run.build() + # print(sphinx_run.status()) + assert sphinx_run.warnings() == "" + assert "test_name" in sphinx_run.app.env.metadata["basic_unrun"] + file_regression.check(sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb") + file_regression.check(sphinx_run.get_doctree().pformat(), extension=".xml") + + @pytest.mark.sphinx_params( "basic_unrun.ipynb", conf={"jupyter_execute_notebooks": "cache"} ) -def test_basic_unrun(sphinx_run, file_regression, check_nbs): +def test_basic_unrun_cache(sphinx_run, file_regression, check_nbs): """The outputs should be populated.""" sphinx_run.build() assert sphinx_run.warnings() == "" @@ -55,7 +67,7 @@ def test_exclude_path(sphinx_run, file_regression): @pytest.mark.sphinx_params( "basic_failing.ipynb", conf={"jupyter_execute_notebooks": "cache"} ) -def test_basic_failing(sphinx_run, file_regression, check_nbs): +def test_basic_failing_cache(sphinx_run, file_regression, check_nbs): sphinx_run.build() # print(sphinx_run.status()) assert "Execution Failed" in sphinx_run.warnings() @@ -69,13 +81,38 @@ def test_basic_failing(sphinx_run, file_regression, check_nbs): @pytest.mark.sphinx_params( - "basic_unrun.ipynb", conf={"jupyter_execute_notebooks": "auto"} + "basic_failing.ipynb", conf={"jupyter_execute_notebooks": "auto"} ) -def test_basic_unrun_nbclient(sphinx_run, file_regression, check_nbs): +def test_basic_failing_auto(sphinx_run, file_regression, check_nbs): sphinx_run.build() # print(sphinx_run.status()) - assert sphinx_run.warnings() == "" - assert "test_name" in sphinx_run.app.env.metadata["basic_unrun"] + assert "Execution Failed" in sphinx_run.warnings() + assert "Execution Failed with traceback saved in" in sphinx_run.warnings() + file_regression.check(sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb") + file_regression.check(sphinx_run.get_doctree().pformat(), extension=".xml") + sphinx_run.get_report_file() + + +@pytest.mark.sphinx_params( + "basic_failing.ipynb", + conf={"jupyter_execute_notebooks": "cache", "execution_allow_errors": True}, +) +def test_allow_errors_cache(sphinx_run, file_regression, check_nbs): + sphinx_run.build() + # print(sphinx_run.status()) + assert not sphinx_run.warnings() + file_regression.check(sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb") + file_regression.check(sphinx_run.get_doctree().pformat(), extension=".xml") + + +@pytest.mark.sphinx_params( + "basic_failing.ipynb", + conf={"jupyter_execute_notebooks": "auto", "execution_allow_errors": True}, +) +def test_allow_errors_auto(sphinx_run, file_regression, check_nbs): + sphinx_run.build() + # print(sphinx_run.status()) + assert not sphinx_run.warnings() file_regression.check(sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb") file_regression.check(sphinx_run.get_doctree().pformat(), extension=".xml") @@ -95,7 +132,7 @@ def test_outputs_present(sphinx_run, file_regression, check_nbs): @pytest.mark.sphinx_params( "complex_outputs_unrun.ipynb", conf={"jupyter_execute_notebooks": "cache"} ) -def test_complex_outputs_unrun(sphinx_run, file_regression, check_nbs): +def test_complex_outputs_unrun_cache(sphinx_run, file_regression, check_nbs): sphinx_run.build() # print(sphinx_run.status()) assert sphinx_run.warnings() == "" @@ -111,7 +148,7 @@ def test_complex_outputs_unrun(sphinx_run, file_regression, check_nbs): @pytest.mark.sphinx_params( "complex_outputs_unrun.ipynb", conf={"jupyter_execute_notebooks": "auto"} ) -def test_complex_outputs_unrun_nbclient(sphinx_run, file_regression, check_nbs): +def test_complex_outputs_unrun_auto(sphinx_run, file_regression, check_nbs): sphinx_run.build() # print(sphinx_run.status()) assert sphinx_run.warnings() == "" diff --git a/tests/test_execute/test_allow_errors_auto.ipynb b/tests/test_execute/test_allow_errors_auto.ipynb new file mode 100644 index 00000000..bacf1769 --- /dev/null +++ b/tests/test_execute/test_allow_errors_auto.ipynb @@ -0,0 +1,56 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# a title\n", + "\n", + "some text\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "ename": "Exception", + "evalue": "oopsie!", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mException\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mException\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'oopsie!'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;31mException\u001b[0m: oopsie!" + ] + } + ], + "source": [ + "raise Exception('oopsie!')" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.8" + }, + "test_name": "notebook1" + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/tests/test_execute/test_allow_errors_auto.xml b/tests/test_execute/test_allow_errors_auto.xml new file mode 100644 index 00000000..9dd3813e --- /dev/null +++ b/tests/test_execute/test_allow_errors_auto.xml @@ -0,0 +1,12 @@ + +
+ + a title + <paragraph> + some text + <CellNode cell_type="code" classes="cell"> + <CellInputNode classes="cell_input"> + <literal_block language="ipython3" xml:space="preserve"> + raise Exception('oopsie!') + <CellOutputNode classes="cell_output"> + <CellOutputBundleNode output_count="1"> diff --git a/tests/test_execute/test_allow_errors_cache.ipynb b/tests/test_execute/test_allow_errors_cache.ipynb new file mode 100644 index 00000000..bacf1769 --- /dev/null +++ b/tests/test_execute/test_allow_errors_cache.ipynb @@ -0,0 +1,56 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# a title\n", + "\n", + "some text\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "ename": "Exception", + "evalue": "oopsie!", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mException\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m<ipython-input-1-714b2b556897>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mException\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'oopsie!'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;31mException\u001b[0m: oopsie!" + ] + } + ], + "source": [ + "raise Exception('oopsie!')" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.8" + }, + "test_name": "notebook1" + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/tests/test_execute/test_allow_errors_cache.xml b/tests/test_execute/test_allow_errors_cache.xml new file mode 100644 index 00000000..9dd3813e --- /dev/null +++ b/tests/test_execute/test_allow_errors_cache.xml @@ -0,0 +1,12 @@ +<document source="basic_failing"> + <section ids="a-title" names="a\ title"> + <title> + a title + <paragraph> + some text + <CellNode cell_type="code" classes="cell"> + <CellInputNode classes="cell_input"> + <literal_block language="ipython3" xml:space="preserve"> + raise Exception('oopsie!') + <CellOutputNode classes="cell_output"> + <CellOutputBundleNode output_count="1"> diff --git a/tests/test_execute/test_basic_failing_auto.ipynb b/tests/test_execute/test_basic_failing_auto.ipynb new file mode 100644 index 00000000..51bac4e5 --- /dev/null +++ b/tests/test_execute/test_basic_failing_auto.ipynb @@ -0,0 +1,56 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# a title\n", + "\n", + "some text\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "ename": "Exception", + "evalue": "oopsie!", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mException\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m<ipython-input-1-714b2b556897>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mException\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'oopsie!'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;31mException\u001b[0m: oopsie!" + ] + } + ], + "source": [ + "raise Exception('oopsie!')" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.1" + }, + "test_name": "notebook1" + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/tests/test_execute/test_basic_failing_auto.xml b/tests/test_execute/test_basic_failing_auto.xml new file mode 100644 index 00000000..9dd3813e --- /dev/null +++ b/tests/test_execute/test_basic_failing_auto.xml @@ -0,0 +1,12 @@ +<document source="basic_failing"> + <section ids="a-title" names="a\ title"> + <title> + a title + <paragraph> + some text + <CellNode cell_type="code" classes="cell"> + <CellInputNode classes="cell_input"> + <literal_block language="ipython3" xml:space="preserve"> + raise Exception('oopsie!') + <CellOutputNode classes="cell_output"> + <CellOutputBundleNode output_count="1"> diff --git a/tests/test_execute/test_basic_failing.ipynb b/tests/test_execute/test_basic_failing_cache.ipynb similarity index 100% rename from tests/test_execute/test_basic_failing.ipynb rename to tests/test_execute/test_basic_failing_cache.ipynb diff --git a/tests/test_execute/test_basic_failing.xml b/tests/test_execute/test_basic_failing_cache.xml similarity index 100% rename from tests/test_execute/test_basic_failing.xml rename to tests/test_execute/test_basic_failing_cache.xml diff --git a/tests/test_execute/test_basic_unrun_nbclient.ipynb b/tests/test_execute/test_basic_unrun_auto.ipynb similarity index 100% rename from tests/test_execute/test_basic_unrun_nbclient.ipynb rename to tests/test_execute/test_basic_unrun_auto.ipynb diff --git a/tests/test_execute/test_basic_unrun.xml b/tests/test_execute/test_basic_unrun_auto.xml similarity index 100% rename from tests/test_execute/test_basic_unrun.xml rename to tests/test_execute/test_basic_unrun_auto.xml diff --git a/tests/test_execute/test_basic_unrun.ipynb b/tests/test_execute/test_basic_unrun_cache.ipynb similarity index 100% rename from tests/test_execute/test_basic_unrun.ipynb rename to tests/test_execute/test_basic_unrun_cache.ipynb diff --git a/tests/test_execute/test_basic_unrun_nbclient.xml b/tests/test_execute/test_basic_unrun_cache.xml similarity index 100% rename from tests/test_execute/test_basic_unrun_nbclient.xml rename to tests/test_execute/test_basic_unrun_cache.xml diff --git a/tests/test_execute/test_complex_outputs_unrun_nbclient.ipynb b/tests/test_execute/test_complex_outputs_unrun_auto.ipynb similarity index 100% rename from tests/test_execute/test_complex_outputs_unrun_nbclient.ipynb rename to tests/test_execute/test_complex_outputs_unrun_auto.ipynb diff --git a/tests/test_execute/test_complex_outputs_unrun_nbclient.xml b/tests/test_execute/test_complex_outputs_unrun_auto.xml similarity index 100% rename from tests/test_execute/test_complex_outputs_unrun_nbclient.xml rename to tests/test_execute/test_complex_outputs_unrun_auto.xml diff --git a/tests/test_execute/test_complex_outputs_unrun.ipynb b/tests/test_execute/test_complex_outputs_unrun_cache.ipynb similarity index 100% rename from tests/test_execute/test_complex_outputs_unrun.ipynb rename to tests/test_execute/test_complex_outputs_unrun_cache.ipynb diff --git a/tests/test_execute/test_complex_outputs_unrun.xml b/tests/test_execute/test_complex_outputs_unrun_cache.xml similarity index 100% rename from tests/test_execute/test_complex_outputs_unrun.xml rename to tests/test_execute/test_complex_outputs_unrun_cache.xml From e8dad541f8b830864f428e46b5738c1cbc143a3c Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Thu, 20 Aug 2020 04:00:58 +0100 Subject: [PATCH 2/7] =?UTF-8?q?=F0=9F=A7=AA=20TEST:=20Add=20auto=20matplot?= =?UTF-8?q?lib=20install?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This import is to mitigate errors on CI VMs, where you can get the message: "Matplotlib is building the font cache" --- tests/conftest.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tests/conftest.py b/tests/conftest.py index ca34e224..388f0ab1 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -24,6 +24,16 @@ TEST_FILE_DIR = Path(__file__).parent.joinpath("notebooks") +@pytest.fixture(autouse=True, scope="session") +def build_matplotlib_font_cache(): + """This is to mitigate errors on CI VMs, where you can get the message: + "Matplotlib is building the font cache" in output notebooks + """ + from matplotlib.font_manager import FontManager + + FontManager() + + @pytest.fixture() def get_test_path(): def _get_test_path(name): From f7260f4f99ac4eea97d30ac8e7a9d9981b924416 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Thu, 20 Aug 2020 04:40:16 +0100 Subject: [PATCH 3/7] =?UTF-8?q?=E2=9C=A8=20NEW:=20Capture=20execution=20da?= =?UTF-8?q?ta=20in=20sphinx=20env?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- myst_nb/__init__.py | 1 + myst_nb/cache.py | 27 ++++++++++++++++++++++++++- tests/test_execute.py | 22 ++++++++++++++++++++++ 3 files changed, 49 insertions(+), 1 deletion(-) diff --git a/myst_nb/__init__.py b/myst_nb/__init__.py index e13ab6e8..d36993a4 100644 --- a/myst_nb/__init__.py +++ b/myst_nb/__init__.py @@ -180,6 +180,7 @@ def set_valid_execution_paths(app): for suffix, parser_type in app.config["source_suffix"].items() if parser_type in ("myst-nb",) } + app.env.nb_execution_data = {} def add_exclude_patterns(app, config): diff --git a/myst_nb/cache.py b/myst_nb/cache.py index db91d188..fb9f5e0e 100644 --- a/myst_nb/cache.py +++ b/myst_nb/cache.py @@ -9,6 +9,7 @@ or if 'auto' / 'force' is set, will execute the notebook. """ +import datetime import os import tempfile from typing import List, Optional, Set @@ -145,14 +146,24 @@ def generate_notebook_outputs( ntbk = result.nb + env.nb_execution_data[env.docname] = { + "mtime": datetime.datetime.utcnow().isoformat(), + "runtime": result.time, + "method": execution_method, + "succeeded": False if result.err else True, + } + return ntbk cache_base = get_cache(path_to_cache) # Use relpath here in case Sphinx is building from a non-parent folder r_file_path = Path(os.path.relpath(file_path, Path().resolve())) + runtime = None + succeeded = False + try: - _, ntbk = cache_base.merge_match_into_notebook(ntbk) + pk, ntbk = cache_base.merge_match_into_notebook(ntbk) except KeyError: message = ( f"Couldn't find cache key for notebook file {str(r_file_path)}. " @@ -182,6 +193,20 @@ def generate_notebook_outputs( ntbk.metadata["language_info"] = nbf.from_dict({"file_extension": ".txt"}) else: LOGGER.verbose("Merged cached outputs into %s", str(r_file_path)) + succeeded = True + try: + runtime = cache_base.get_cache_record(pk).data.get( + "execution_seconds", None + ) + except Exception: + pass + + env.nb_execution_data[env.docname] = { + "mtime": datetime.datetime.utcnow().isoformat(), + "runtime": runtime, + "method": execution_method, + "succeeded": succeeded, + } return ntbk diff --git a/tests/test_execute.py b/tests/test_execute.py index bb8ff0af..b7b94ba4 100644 --- a/tests/test_execute.py +++ b/tests/test_execute.py @@ -12,6 +12,13 @@ def test_basic_unrun_auto(sphinx_run, file_regression, check_nbs): file_regression.check(sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb") file_regression.check(sphinx_run.get_doctree().pformat(), extension=".xml") + # Test execution statistics, should look like: + # {'basic_unrun': {'mtime': '2020-08-20T03:32:27.061454', 'runtime': 0.964572671, + # 'method': 'auto', 'succeeded': True}} + assert "basic_unrun" in sphinx_run.env.nb_execution_data + assert sphinx_run.env.nb_execution_data["basic_unrun"]["method"] == "auto" + assert sphinx_run.env.nb_execution_data["basic_unrun"]["succeeded"] is True + @pytest.mark.sphinx_params( "basic_unrun.ipynb", conf={"jupyter_execute_notebooks": "cache"} @@ -24,6 +31,13 @@ def test_basic_unrun_cache(sphinx_run, file_regression, check_nbs): file_regression.check(sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb") file_regression.check(sphinx_run.get_doctree().pformat(), extension=".xml") + # Test execution statistics, should look like: + # {'basic_unrun': {'mtime': '2020-08-20T03:32:27.061454', 'runtime': 0.964572671, + # 'method': 'cache', 'succeeded': True}} + assert "basic_unrun" in sphinx_run.env.nb_execution_data + assert sphinx_run.env.nb_execution_data["basic_unrun"]["method"] == "cache" + assert sphinx_run.env.nb_execution_data["basic_unrun"]["succeeded"] is True + @pytest.mark.sphinx_params( "basic_unrun.ipynb", conf={"jupyter_execute_notebooks": "cache"} @@ -79,6 +93,10 @@ def test_basic_failing_cache(sphinx_run, file_regression, check_nbs): file_regression.check(sphinx_run.get_doctree().pformat(), extension=".xml") sphinx_run.get_report_file() + assert "basic_failing" in sphinx_run.env.nb_execution_data + assert sphinx_run.env.nb_execution_data["basic_failing"]["method"] == "cache" + assert sphinx_run.env.nb_execution_data["basic_failing"]["succeeded"] is False + @pytest.mark.sphinx_params( "basic_failing.ipynb", conf={"jupyter_execute_notebooks": "auto"} @@ -92,6 +110,10 @@ def test_basic_failing_auto(sphinx_run, file_regression, check_nbs): file_regression.check(sphinx_run.get_doctree().pformat(), extension=".xml") sphinx_run.get_report_file() + assert "basic_failing" in sphinx_run.env.nb_execution_data + assert sphinx_run.env.nb_execution_data["basic_failing"]["method"] == "auto" + assert sphinx_run.env.nb_execution_data["basic_failing"]["succeeded"] is False + @pytest.mark.sphinx_params( "basic_failing.ipynb", From 8534c2c87ebae52c08a64fd7ee59772ced4603c7 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Thu, 20 Aug 2020 05:44:31 +0100 Subject: [PATCH 4/7] =?UTF-8?q?=F0=9F=91=8C=20IMPROVE:=20Store=20error=20l?= =?UTF-8?q?og=20path=20in=20`env.nb=5Fexecution=5Fdata`?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- myst_nb/cache.py | 14 +++++++++++--- tests/test_execute.py | 2 ++ 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/myst_nb/cache.py b/myst_nb/cache.py index fb9f5e0e..e8e22717 100644 --- a/myst_nb/cache.py +++ b/myst_nb/cache.py @@ -134,8 +134,9 @@ def generate_notebook_outputs( allow_errors=env.config["execution_allow_errors"], ) + report_path = None if result.err: - message = _report_exec_fail( + report_path, message = _report_exec_fail( env, Path(file_path).name, result.exc_string, @@ -152,6 +153,8 @@ def generate_notebook_outputs( "method": execution_method, "succeeded": False if result.err else True, } + if report_path: + env.nb_execution_data[env.docname]["error_log"] = report_path return ntbk @@ -159,8 +162,10 @@ def generate_notebook_outputs( # Use relpath here in case Sphinx is building from a non-parent folder r_file_path = Path(os.path.relpath(file_path, Path().resolve())) + # default execution data runtime = None succeeded = False + report_path = None try: pk, ntbk = cache_base.merge_match_into_notebook(ntbk) @@ -174,13 +179,14 @@ def generate_notebook_outputs( except KeyError: stage_record = None if stage_record and stage_record.traceback: - message += _report_exec_fail( + report_path, suffix = _report_exec_fail( env, r_file_path.name, stage_record.traceback, show_traceback, "\n Last execution failed with traceback saved in {}", ) + message += suffix LOGGER.error(message) @@ -207,6 +213,8 @@ def generate_notebook_outputs( "method": execution_method, "succeeded": succeeded, } + if report_path: + env.nb_execution_data[env.docname]["error_log"] = report_path return ntbk @@ -233,7 +241,7 @@ def _report_exec_fail( message = template.format(full_path) if show_traceback: message += "\n" + traceback - return message + return str(full_path), message def _stage_and_execute( diff --git a/tests/test_execute.py b/tests/test_execute.py index b7b94ba4..eb2e00b2 100644 --- a/tests/test_execute.py +++ b/tests/test_execute.py @@ -96,6 +96,7 @@ def test_basic_failing_cache(sphinx_run, file_regression, check_nbs): assert "basic_failing" in sphinx_run.env.nb_execution_data assert sphinx_run.env.nb_execution_data["basic_failing"]["method"] == "cache" assert sphinx_run.env.nb_execution_data["basic_failing"]["succeeded"] is False + assert "error_log" in sphinx_run.env.nb_execution_data["basic_failing"] @pytest.mark.sphinx_params( @@ -113,6 +114,7 @@ def test_basic_failing_auto(sphinx_run, file_regression, check_nbs): assert "basic_failing" in sphinx_run.env.nb_execution_data assert sphinx_run.env.nb_execution_data["basic_failing"]["method"] == "auto" assert sphinx_run.env.nb_execution_data["basic_failing"]["succeeded"] is False + assert "error_log" in sphinx_run.env.nb_execution_data["basic_failing"] @pytest.mark.sphinx_params( From 126909d91fc6b5d85cdd043a63874254e7a3cfbe Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Thu, 20 Aug 2020 09:13:51 +0100 Subject: [PATCH 5/7] =?UTF-8?q?=E2=9C=A8=20NEW:=20Add=20`nb-exec-table`=20?= =?UTF-8?q?directive?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/examples/index.md | 3 ++ myst_nb/__init__.py | 7 +++- myst_nb/cache.py | 7 ++-- myst_nb/exec_table.py | 93 ++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 106 insertions(+), 4 deletions(-) create mode 100644 myst_nb/exec_table.py diff --git a/docs/examples/index.md b/docs/examples/index.md index eae93bb5..84d107c3 100644 --- a/docs/examples/index.md +++ b/docs/examples/index.md @@ -3,6 +3,9 @@ This section is to collect examples that others can use for inspiration and reference. +```{nb-exec-table} +``` + ```{toctree} basic coconut-lang diff --git a/myst_nb/__init__.py b/myst_nb/__init__.py index d36993a4..4f121c93 100644 --- a/myst_nb/__init__.py +++ b/myst_nb/__init__.py @@ -35,6 +35,7 @@ PasteInlineNode, ) from .nb_glue.transform import PasteNodesToDocutils +from .exec_table import setup_exec_table LOGGER = logging.getLogger(__name__) @@ -132,6 +133,9 @@ def visit_element_html(self, node): app.add_domain(NbGlueDomain) app.add_directive("code-cell", CodeCell) + # execution statistics table + setup_exec_table(app) + # TODO need to deal with key clashes in NbGlueDomain.merge_domaindata # before this is parallel_read_safe return {"version": __version__, "parallel_read_safe": False} @@ -180,7 +184,8 @@ def set_valid_execution_paths(app): for suffix, parser_type in app.config["source_suffix"].items() if parser_type in ("myst-nb",) } - app.env.nb_execution_data = {} + if not hasattr(app.env, "nb_execution_data"): + app.env.nb_execution_data = {} def add_exclude_patterns(app, config): diff --git a/myst_nb/cache.py b/myst_nb/cache.py index e8e22717..d976186d 100644 --- a/myst_nb/cache.py +++ b/myst_nb/cache.py @@ -9,7 +9,7 @@ or if 'auto' / 'force' is set, will execute the notebook. """ -import datetime +from datetime import datetime import os import tempfile from typing import List, Optional, Set @@ -57,6 +57,7 @@ def update_execution_cache( cache_base = get_cache(app.env.nb_path_to_cache) for path in removed: + app.env.nb_execution_data.pop(path, None) docpath = app.env.doc2path(path) # there is an issue in sphinx doc2path, whereby if the path does not # exist then it will be assigned the default source_suffix (usually .rst) @@ -148,7 +149,7 @@ def generate_notebook_outputs( ntbk = result.nb env.nb_execution_data[env.docname] = { - "mtime": datetime.datetime.utcnow().isoformat(), + "mtime": datetime.utcnow().timestamp(), "runtime": result.time, "method": execution_method, "succeeded": False if result.err else True, @@ -208,7 +209,7 @@ def generate_notebook_outputs( pass env.nb_execution_data[env.docname] = { - "mtime": datetime.datetime.utcnow().isoformat(), + "mtime": datetime.utcnow().timestamp(), "runtime": runtime, "method": execution_method, "succeeded": succeeded, diff --git a/myst_nb/exec_table.py b/myst_nb/exec_table.py new file mode 100644 index 00000000..4da50eff --- /dev/null +++ b/myst_nb/exec_table.py @@ -0,0 +1,93 @@ +"""A directive to create a table of executed notebooks, and related statistics.""" +from datetime import datetime + +from docutils import nodes +from sphinx.transforms.post_transforms import SphinxPostTransform +from sphinx.util.docutils import SphinxDirective + + +def setup_exec_table(app): + """execution statistics table.""" + app.add_node(ExecutionStatsNode) + app.add_directive("nb-exec-table", ExecutionStatsTable) + app.add_post_transform(ExecutionStatsPostTransform) + + +class ExecutionStatsNode(nodes.General, nodes.Element): + """A placeholder node, for adding a notebook execution statistics table.""" + + +class ExecutionStatsTable(SphinxDirective): + """Add a notebook execution statistics table.""" + + has_content = True + final_argument_whitespace = True + + def run(self): + + return [ExecutionStatsNode()] + + +class ExecutionStatsPostTransform(SphinxPostTransform): + """Replace the placeholder node with the final table.""" + + default_priority = 400 + + def run(self, **kwargs) -> None: + for node in self.document.traverse(ExecutionStatsNode): + node.replace_self(make_stat_table(self.env.nb_execution_data)) + + +def make_stat_table(nb_execution_data): + + key2header = { + "mtime": "Modified", + "method": "Method", + "runtime": "Run Time (s)", + "succeeded": "Status", + } + + key2transform = { + "mtime": lambda x: datetime.fromtimestamp(x).strftime("%Y-%m-%d %H:%M"), + "method": str, + "runtime": lambda x: str(round(x, 2)), + "succeeded": lambda x: "✅" if x else "❌", + } + + # top-level element + table = nodes.table() + table["classes"] += ["colwidths-auto"] + # self.set_source_info(table) + + # column settings element + ncols = len(key2header) + 1 + tgroup = nodes.tgroup(cols=ncols) + table += tgroup + colwidths = [round(100 / ncols, 2)] * ncols + for colwidth in colwidths: + colspec = nodes.colspec(colwidth=colwidth) + tgroup += colspec + + # header + thead = nodes.thead() + tgroup += thead + row = nodes.row() + thead += row + + for name in ["Document"] + list(key2header.values()): + row.append(nodes.entry("", nodes.paragraph(text=name))) + + # body + tbody = nodes.tbody() + tgroup += tbody + + for doc, data in nb_execution_data.items(): + row = nodes.row() + tbody += row + row.append(nodes.entry("", nodes.paragraph(text=doc))) + for name in key2header.keys(): + row.append( + nodes.entry("", nodes.paragraph(text=key2transform[name](data[name]))) + ) + + return table From 983d23f87947556cd5d70cae013a9696182b20c3 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Thu, 20 Aug 2020 09:58:28 +0100 Subject: [PATCH 6/7] =?UTF-8?q?=F0=9F=A7=AA=20TEST:=20add=20test=20for=20`?= =?UTF-8?q?nb-exec-table`?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- myst_nb/exec_table.py | 13 +++++++------ tests/notebooks/nb_exec_table.md | 20 ++++++++++++++++++++ tests/test_execute.py | 13 +++++++++++++ tests/test_execute/test_nb_exec_table.xml | 10 ++++++++++ 4 files changed, 50 insertions(+), 6 deletions(-) create mode 100644 tests/notebooks/nb_exec_table.md create mode 100644 tests/test_execute/test_nb_exec_table.xml diff --git a/myst_nb/exec_table.py b/myst_nb/exec_table.py index 4da50eff..e79f2faf 100644 --- a/myst_nb/exec_table.py +++ b/myst_nb/exec_table.py @@ -48,10 +48,12 @@ def make_stat_table(nb_execution_data): } key2transform = { - "mtime": lambda x: datetime.fromtimestamp(x).strftime("%Y-%m-%d %H:%M"), + "mtime": lambda x: datetime.fromtimestamp(x).strftime("%Y-%m-%d %H:%M") + if x + else "", "method": str, - "runtime": lambda x: str(round(x, 2)), - "succeeded": lambda x: "✅" if x else "❌", + "runtime": lambda x: "-" if x is None else str(round(x, 2)), + "succeeded": lambda x: "✅" if x is True else "❌", } # top-level element @@ -86,8 +88,7 @@ def make_stat_table(nb_execution_data): tbody += row row.append(nodes.entry("", nodes.paragraph(text=doc))) for name in key2header.keys(): - row.append( - nodes.entry("", nodes.paragraph(text=key2transform[name](data[name]))) - ) + text = key2transform[name](data[name]) + row.append(nodes.entry("", nodes.paragraph(text=text))) return table diff --git a/tests/notebooks/nb_exec_table.md b/tests/notebooks/nb_exec_table.md new file mode 100644 index 00000000..0242ad26 --- /dev/null +++ b/tests/notebooks/nb_exec_table.md @@ -0,0 +1,20 @@ +--- +jupytext: + text_representation: + extension: .md + format_name: myst + format_version: '0.8' + jupytext_version: 1.4.1+dev +kernelspec: + display_name: Python 3 + language: python + name: python3 +author: Chris +--- + +# Test the `nb-exec-table` directive + +This directive should generate a table of executed notebook statistics. + +```{nb-exec-table} +``` diff --git a/tests/test_execute.py b/tests/test_execute.py index eb2e00b2..debc0822 100644 --- a/tests/test_execute.py +++ b/tests/test_execute.py @@ -244,3 +244,16 @@ def test_execution_metadata_timeout(sphinx_run, file_regression, check_nbs): """ notebook timeout metadata has higher preference then execution_timeout config""" sphinx_run.build() assert "execution failed" in sphinx_run.warnings() + + +@pytest.mark.sphinx_params( + "nb_exec_table.md", conf={"jupyter_execute_notebooks": "auto"}, +) +def test_nb_exec_table(sphinx_run, file_regression, check_nbs): + """Test that the table gets output into the HTML, + including a row for the executed notebook. + """ + sphinx_run.build() + assert not sphinx_run.warnings() + file_regression.check(sphinx_run.get_doctree().pformat(), extension=".xml") + assert '<tr class="row-even"><td><p>nb_exec_table</p></td>' in sphinx_run.get_html() diff --git a/tests/test_execute/test_nb_exec_table.xml b/tests/test_execute/test_nb_exec_table.xml new file mode 100644 index 00000000..73697e23 --- /dev/null +++ b/tests/test_execute/test_nb_exec_table.xml @@ -0,0 +1,10 @@ +<document source="nb_exec_table"> + <section ids="test-the-nb-exec-table-directive" names="test\ the\ nb-exec-table\ directive"> + <title> + Test the + <literal> + nb-exec-table + directive + <paragraph> + This directive should generate a table of executed notebook statistics. + <ExecutionStatsNode> From d18638978299f17a25f5f409e4323c948eada639 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Thu, 20 Aug 2020 10:50:05 +0100 Subject: [PATCH 7/7] =?UTF-8?q?=F0=9F=93=9A=20DOCS:=20Document=20new=20exe?= =?UTF-8?q?cution=20features?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/examples/index.md | 3 - docs/use/execute.md | 130 +++++++++++++++++------------ docs/use/start.md | 9 +- myst_nb/__init__.py | 4 +- myst_nb/{cache.py => execution.py} | 6 +- myst_nb/parser.py | 2 +- 6 files changed, 92 insertions(+), 62 deletions(-) rename myst_nb/{cache.py => execution.py} (97%) diff --git a/docs/examples/index.md b/docs/examples/index.md index 84d107c3..eae93bb5 100644 --- a/docs/examples/index.md +++ b/docs/examples/index.md @@ -3,9 +3,6 @@ This section is to collect examples that others can use for inspiration and reference. -```{nb-exec-table} -``` - ```{toctree} basic coconut-lang diff --git a/docs/use/execute.md b/docs/use/execute.md index 125bb262..1f297946 100644 --- a/docs/use/execute.md +++ b/docs/use/execute.md @@ -16,104 +16,100 @@ kernelspec: # Executing and cacheing your content MyST-NB can automatically run and cache notebooks contained in your project using [jupyter-cache]. -Notebooks can either be run each time the documentation is built, or cached -locally so that re-runs occur only when code cells have changed. +Notebooks can either be run each time the documentation is built, or cached locally so that re-runs occur only when code cells have changed. -Cacheing behavior is controlled with configuration in your `conf.py` file. See -the sections below for each configuration option and its effect. +Caching behaviour is controlled with configuration in your `conf.py` file. +See the sections below for each configuration option and its effect. (execute/config)= ## Triggering notebook execution -To trigger the execution of notebook pages, use the following configuration in `conf.py` +To trigger the execution of notebook pages, use the following configuration in `conf.py`: -``` +```python jupyter_execute_notebooks = "auto" ``` -By default, this will only execute notebooks that are missing at least one output. If -a notebook has *all* of its outputs populated, then it will not be executed. +By default, this will only execute notebooks that are missing at least one output. +If a notebook has *all* of its outputs populated, then it will not be executed. -**To force the execution of all notebooks, regardless of their outputs**, change the -above configuration value to: +**To force the execution of all notebooks, regardless of their outputs**, change the above configuration value to: -``` +```python jupyter_execute_notebooks = "force" ``` -**To cache execution outputs with [jupyter-cache]**, change the above configuration -value to: +**To cache execution outputs with [jupyter-cache]**, change the above configuration value to: -``` +```python jupyter_execute_notebooks = "cache" ``` See {ref}`execute/cache` for more information. -**To turn off notebook execution**, change the -above configuration value to: +**To turn off notebook execution**, change the above configuration value to: -``` +```python jupyter_execute_notebooks = "off" ``` -**To exclude certain file patterns from execution**, use the following -configuration: +**To exclude certain file patterns from execution**, use the following configuration: -``` +```python execution_excludepatterns = ['list', 'of', '*patterns'] ``` -Any file that matches one of the items in `execution_excludepatterns` will not be -executed. +Any file that matches one of the items in `execution_excludepatterns` will not be executed. (execute/cache)= ## Cacheing the notebook execution -As mentioned above, you can **cache the results of executing a notebook page** by setting +As mentioned above, you can **cache the results of executing a notebook page** by setting: -``` +```python jupyter_execute_notebooks = "cache" ``` -in your conf.py file. In this case, when a page is executed, its outputs -will be stored in a local database. This allows you to be sure that the -outputs in your documentation are up-to-date, while saving time avoiding -unnecessary re-execution. It also allows you to store your `.ipynb` files in -your `git` repository *without their outputs*, but still leverage a cache to -save time when building your site. +in your conf.py file. + +In this case, when a page is executed, its outputs will be stored in a local database. +This allows you to be sure that the outputs in your documentation are up-to-date, while saving time avoiding unnecessary re-execution. +It also allows you to store your `.ipynb` files (or their `.md` equivalent) in your `git` repository *without their outputs*, but still leverage a cache to save time when building your site. When you re-build your site, the following will happen: -* Notebooks that have not seen changes to their **code cells** since the last build - will not be re-executed. Instead, their outputs will be pulled from the cache - and inserted into your site. -* Notebooks that **have any change to their code cells** will be re-executed, and the - cache will be updated with the new outputs. +* Notebooks that have not seen changes to their **code cells** or **metadata** since the last build will not be re-executed. + Instead, their outputs will be pulled from the cache and inserted into your site. +* Notebooks that **have any change to their code cells** will be re-executed, and the cache will be updated with the new outputs. -By default, the cache will be placed in the parent of your build folder. Generally, -this is in `_build/.jupyter_cache`. +By default, the cache will be placed in the parent of your build folder. +Generally, this is in `_build/.jupyter_cache`. You may also specify a path to the location of a jupyter cache you'd like to use: -``` -jupyter_cache = path/to/mycache +```python +jupyter_cache = "path/to/mycache" ``` -The path should point to an **empty folder**, or a folder where a -**jupyter cache already exists**. +The path should point to an **empty folder**, or a folder where a **jupyter cache already exists**. [jupyter-cache]: https://github.com/executablebooks/jupyter-cache "the Jupyter Cache Project" +## Executing in temporary folders + +By default, the command working directory (cwd) that a notebook runs in will be its parent directory. +However, you can set `execution_in_temp=True` in your `conf.py`, to change this behaviour such that, for each execution, a temporary directory will be created and used as the cwd. + (execute/timeout)= ## Execution Timeout The execution of notebooks is managed by {doc}`nbclient <nbclient:client>`. -The `execution_timeout` sphinx option defines the maximum time (in seconds) each notebook cell is allowed to run, if the execution takes longer an exception will be raised. +The `execution_timeout` sphinx option defines the maximum time (in seconds) each notebook cell is allowed to run. +if the execution takes longer an exception will be raised. The default is 30 s, so in cases of long-running cells you may want to specify an higher value. -The timeout option can also be set to None or -1 to remove any restriction on execution time. +The timeout option can also be set to `None` or -1 to remove any restriction on execution time. This global value can also be overridden per notebook by adding this to you notebooks metadata: @@ -126,19 +122,32 @@ This global value can also be overridden per notebook by adding this to you note } ``` -## Execution FAQs +(execute/allow_errors)= +## Dealing with code that raises errors -### How can I include code that raises errors? +In some cases, you may want to intentionally show code that doesn't work (e.g., to show the error message). +You can achieve this at "three levels": -In some cases, you may want to intentionally show code that doesn't work (e.g., to show -the error message). To do this, add a `raises-exception` tag to your code cell. This -can be done via a Jupyter interface, or via the `{code-cell}` directive like so: +Globally, by setting `execution_allow_errors=True` in your `conf.py`. -```` +Per notebook (overrides global), by adding this to you notebooks metadata: + +```json +{ +"metadata": { + "execution": { + "allow_errors": true + } +} +``` + +Per cell, by adding a `raises-exception` tag to your code cell. +This can be done via a Jupyter interface, or via the `{code-cell}` directive like so: + +````md ```{code-cell} ---- -tags: [raises-exception] ---- +:tags: [raises-exception] + print(thisvariabledoesntexist) ``` ```` @@ -151,3 +160,20 @@ tags: [raises-exception] --- print(thisvariabledoesntexist) ``` + +(execute/statistics)= +## Execution Statistics + +As notebooks are executed, certain statistics are stored in a dictionary (`{docname:data}`), and saved on the [sphinx environment object](https://www.sphinx-doc.org/en/master/extdev/envapi.html#sphinx.environment.BuildEnvironment) as `env.nb_execution_data`. + +You can access this in a post-transform in your own sphinx extensions, or use the built-in `nb-exec-table` directive: + +````md +```{nb-exec-table} +``` +```` + +which produces: + +```{nb-exec-table} +``` diff --git a/docs/use/start.md b/docs/use/start.md index 04f5d336..3e54f555 100644 --- a/docs/use/start.md +++ b/docs/use/start.md @@ -52,10 +52,17 @@ MyST-NB then adds some additional configuration, specific to notebooks: * - `jupyter_execute_notebooks` - "auto" - The logic for executing notebooks, [see here](execute/config) for details. +* - `execution_in_temp` + - `False` + - If `True`, then a temporary directory will be created and used as the command working directory (cwd), if `False` then the notebook's parent directory will be the cwd. +* - `execution_allow_errors` + - `False` + - If `False`, when a code cell raises an error the execution is stopped, if `True` then all cells are always run. + This can also be overridden by metadata in a notebook, [see here](execute/allow_errors) for details. * - `execution_timeout` - 30 - The maximum time (in seconds) each notebook cell is allowed to run. - This can be overridden by metadata in a notebook, [see here](execute/timeout) for detail. + This can also be overridden by metadata in a notebook, [see here](execute/timeout) for details. * - `execution_show_tb` - `False` - Show failed notebook tracebacks in stdout (in addition to writing to file). diff --git a/myst_nb/__init__.py b/myst_nb/__init__.py index 4f121c93..2cbb8f16 100644 --- a/myst_nb/__init__.py +++ b/myst_nb/__init__.py @@ -16,7 +16,7 @@ JupyterCell, ) -from .cache import update_execution_cache +from .execution import update_execution_cache from .parser import ( NotebookParser, CellNode, @@ -106,7 +106,7 @@ def visit_element_html(self, node): app.add_config_value("jupyter_execute_notebooks", "auto", "env") app.add_config_value("execution_timeout", 30, "env") app.add_config_value("execution_allow_errors", False, "env") - app.add_config_value("execution_in_temp", True, "env") + app.add_config_value("execution_in_temp", False, "env") # show traceback in stdout (in addition to writing to file) # this is useful in e.g. RTD where one cannot inspect a file app.add_config_value("execution_show_tb", False, "") diff --git a/myst_nb/cache.py b/myst_nb/execution.py similarity index 97% rename from myst_nb/cache.py rename to myst_nb/execution.py index d976186d..d6104693 100644 --- a/myst_nb/cache.py +++ b/myst_nb/execution.py @@ -109,7 +109,7 @@ def generate_notebook_outputs( if not path_to_cache: - if execution_method == "auto" and is_nb_with_outputs(file_path): + if execution_method == "auto" and nb_has_all_output(file_path): LOGGER.info( "Did not execute %s. " "Set jupyter_execute_notebooks to `force` to execute", @@ -310,8 +310,8 @@ def execute_staged_nb( return result -def is_nb_with_outputs(source_path: str, nb_extensions: List[str] = ["ipynb"]) -> bool: - """Determine if the path contains a notebook with outputs.""" +def nb_has_all_output(source_path: str, nb_extensions: List[str] = ["ipynb"]) -> bool: + """Determine if the path contains a notebook with at least one output.""" has_outputs = False ext = os.path.splitext(source_path)[1] diff --git a/myst_nb/parser.py b/myst_nb/parser.py index 0c90d978..a86b0bdf 100644 --- a/myst_nb/parser.py +++ b/myst_nb/parser.py @@ -18,7 +18,7 @@ from myst_parser.sphinx_renderer import SphinxRenderer from myst_parser.sphinx_parser import MystParser -from myst_nb.cache import generate_notebook_outputs +from myst_nb.execution import generate_notebook_outputs from myst_nb.converter import string_to_notebook from myst_nb.nb_glue import GLUE_PREFIX from myst_nb.nb_glue.domain import NbGlueDomain