diff --git a/.github/workflows/requirements-pr.yml b/.github/workflows/requirements-pr.yml deleted file mode 100644 index 2b981747..00000000 --- a/.github/workflows/requirements-pr.yml +++ /dev/null @@ -1,82 +0,0 @@ -# cspell:ignore noreply - -name: Requirements (PR) - -on: - pull_request: - branches: [master] - -jobs: - upgrade: - name: Upgrade requirement files - runs-on: ubuntu-20.04 - strategy: - matrix: - python-version: - - 3.6 - - 3.7 - - 3.8 - steps: - - uses: actions/checkout@v2 - - run: git fetch origin - - name: Check for modified dependencies - id: git-diff - run: - echo "::set-output name=dependency-changes::$(git diff - origin/$GITHUB_BASE_REF --name-only -- reqs setup.cfg)" - # Also triggers on (unwanted) changes to requirements.txt files - - name: Show dependency changes changes - run: - git diff origin/$GITHUB_BASE_REF -- reqs/requirements*.in setup.cfg - - name: Abort if fork PR and dependency changes - if: - github.event.pull_request.head.repo.full_name != github.repository && - steps.git-diff.outputs.dependency-changes != '' - run: | - echo "::error::Cannot modify package dependencies through a PR from a fork" - exit 1 - - name: Set up Python ${{ matrix.python-version }} - if: steps.git-diff.outputs.dependency-changes != '' - uses: actions/setup-python@master - with: - python-version: ${{ matrix.python-version }} - - name: Install dependencies - if: steps.git-diff.outputs.dependency-changes != '' - run: | - python -m pip install --upgrade pip - pip install pip-tools - - name: Upgrade dependencies - if: steps.git-diff.outputs.dependency-changes != '' - run: bash reqs/upgrade.sh - - uses: actions/upload-artifact@v2 - with: - name: ${{ matrix.python-version }} - path: reqs/${{ matrix.python-version }} - - push: - name: Push changes - runs-on: ubuntu-20.04 - needs: - - upgrade - steps: - - uses: actions/checkout@v2 - with: - token: ${{ secrets.PAT }} - - uses: actions/download-artifact@v2 - with: - path: reqs - - run: git status -s - - name: Commit and push changes - run: | - git remote set-url origin https://x-access-token:${{ secrets.PAT }}@github.com/${{ github.repository }} - git config --global user.name ${{ github.actor }} - git config --global user.email "${{ github.actor }}@users.noreply.github.com" - git checkout -b ${GITHUB_HEAD_REF} - FILES_TO_COMMIT="reqs/*/requirements*.txt" - if [[ $(git diff -- ${FILES_TO_COMMIT}) ]]; then - git add ${FILES_TO_COMMIT} - git commit -m "ci: upgrade developer dependencies (automatic)" - git config pull.rebase true - git pull origin ${GITHUB_HEAD_REF} - git push origin HEAD:${GITHUB_HEAD_REF} - fi diff --git a/.gitignore b/.gitignore index e54e4253..324e953f 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ *.json *.npy *.pdf +*.pickle *.png *.svg *.v2 diff --git a/cspell.json b/cspell.json index 932adcff..39e58863 100644 --- a/cspell.json +++ b/cspell.json @@ -47,6 +47,7 @@ ], "language": "en-US", "words": [ + "backends", "blatt", "bottomness", "breit", @@ -72,6 +73,7 @@ "itertools", "jsonschema", "jupyter", + "lineshape", "mathbb", "matplotlib", "mkdir", @@ -97,6 +99,7 @@ "topness", "traceback", "unbinned", + "vectorize", "venv", "weisskopf", "xcode", @@ -104,10 +107,12 @@ ], "ignoreWords": [ "amplitf", + "arange", "atfi", "axvline", "cano", "celltoolbar", + "cmin", "codacy", "codecov", "codemirror", @@ -118,18 +123,24 @@ "dtype", "eval", "evalf", + "figsize", "fmin", "fval", "genindex", + "gridspec", "heli", "histtype", "iloc", + "imag", "iminuit", "indeterministic", "isort", "jupyterlab", "keras", "kernelspec", + "lambdification", + "lambdified", + "lambdify", "linestyle", "linkcheck", "linspace", @@ -148,6 +159,7 @@ "ncalls", "ndarray", "noqa", + "numba", "pandoc", "phasespace", "phsp", @@ -162,11 +174,18 @@ "pytestconfig", "rightarrow", "rtfd", + "scipy", "seealso", + "sharex", "subsys", + "tolist", "tqdm", + "unflatten", + "unflattened", "unnormalized", "vstack", - "xlabel" + "xlabel", + "xlim", + "ylim" ] } \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py index 7c5f4b8b..fe3bcd95 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -135,7 +135,7 @@ # Intersphinx settings intersphinx_mapping = { "expertsystem": ( - "https://pwa.readthedocs.io/projects/expertsystem/en/0.6.9", + "https://pwa.readthedocs.io/projects/expertsystem/en/sympy", None, ), "iminuit": ("https://iminuit.readthedocs.io/en/stable", None), @@ -146,6 +146,7 @@ "pwa": ("https://pwa.readthedocs.io", None), "pycompwa": ("https://compwa.github.io", None), "python": ("https://docs.python.org/3", None), + "sympy": ("https://docs.sympy.org/latest", None), "tensorflow": ( "https://www.tensorflow.org/api_docs/python", "tensorflow.inv", @@ -182,7 +183,7 @@ nb_render_priority["doctest"] = nb_render_priority["html"] jupyter_execute_notebooks = "off" -if "EXECUTE_NB" in os.environ: +if "EXECUTE_NB" in os.environ or "READTHEDOCS" in os.environ: print("\033[93;1mWill run Jupyter notebooks!\033[0m") jupyter_execute_notebooks = "force" diff --git a/docs/usage.ipynb b/docs/usage.ipynb new file mode 100644 index 00000000..3d65ee2c --- /dev/null +++ b/docs/usage.ipynb @@ -0,0 +1,459 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Usage" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The following pages go through the usual workflow when using {mod}`tensorwaves`. The output in each of these steps is stored, so that they can be run separately. This page shows some of the highlights of the complete workflow." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{toctree}\n", + "---\n", + "maxdepth: 2\n", + "---\n", + "usage/1_create_model\n", + "usage/2_generate_data\n", + "usage/3_perform_fit\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create an amplitude model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{seealso}\n", + "\n", + "{doc}`usage/1_create_model`\n", + "\n", + ":::" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-cell" + ] + }, + "outputs": [], + "source": [ + "import expertsystem as es\n", + "import graphviz\n", + "import matplotlib.pyplot as plt\n", + "import pandas as pd\n", + "from expertsystem.amplitude.dynamics.builder import (\n", + " create_relativistic_breit_wigner_with_ff,\n", + ")\n", + "from tensorwaves.data.generate import generate_data, generate_phsp\n", + "from tensorwaves.estimator import SympyUnbinnedNLL\n", + "from tensorwaves.optimizer.callbacks import CSVSummary\n", + "from tensorwaves.optimizer.minuit import Minuit2\n", + "from tensorwaves.physics.amplitude import Intensity, SympyModel\n", + "from tensorwaves.physics.helicity_formalism.kinematics import (\n", + " HelicityKinematics,\n", + " ParticleReactionKinematicsInfo,\n", + " SubSystem,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = es.generate_transitions(\n", + " initial_state=(\"J/psi(1S)\", [-1, +1]),\n", + " final_state=[\"gamma\", \"pi0\", \"pi0\"],\n", + " allowed_intermediate_particles=[\"f(0)\"],\n", + " allowed_interaction_types=[\"strong\", \"em\"],\n", + " formalism_type=\"canonical-helicity\",\n", + ")\n", + "graphs = result.collapse_graphs()\n", + "dot = es.io.convert_to_dot(graphs)\n", + "graphviz.Source(dot)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "model_builder = es.amplitude.get_builder(result)\n", + "for name in result.get_intermediate_particles().names:\n", + " model_builder.set_dynamics(name, create_relativistic_breit_wigner_with_ff)\n", + "model = model_builder.generate()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-output" + ] + }, + "outputs": [], + "source": [ + "next(iter(model.components.values())).doit()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-output" + ] + }, + "outputs": [], + "source": [ + "sorted(model.parameters, key=lambda s: s.name)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Generate toy MC sample" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{seealso}\n", + "\n", + "{doc}`usage/2_generate_data`\n", + "\n", + ":::" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "sympy_model = SympyModel(\n", + " expression=model.expression,\n", + " parameters={k: v.value for k, v in model.parameters.items()},\n", + " variables={},\n", + ")\n", + "intensity = Intensity(sympy_model)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for state_id, particle in model.kinematics.final_state.items():\n", + " print(f\"ID {state_id}:\", particle.name)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "kin = HelicityKinematics.from_model(model)\n", + "kin.register_subsystem(SubSystem([[3, 4], [2]], [], []))\n", + "kin.register_subsystem(SubSystem([[3], [4]], [2], []))\n", + "kin.register_invariant_mass([2, 4]);" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "phsp_sample = generate_phsp(300_000, kin)\n", + "data_sample = generate_data(30_000, kin, intensity)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "phsp_set = kin.convert(phsp_sample)\n", + "data_set = kin.convert(data_sample)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-cell" + ] + }, + "outputs": [], + "source": [ + "import numpy as np\n", + "from matplotlib import cm\n", + "\n", + "intermediate_states = sorted(\n", + " (\n", + " p\n", + " for p in model.particles\n", + " if p not in model.kinematics.final_state.values()\n", + " and p not in model.kinematics.initial_state.values()\n", + " ),\n", + " key=lambda p: p.mass,\n", + ")\n", + "\n", + "evenly_spaced_interval = np.linspace(0, 1, len(intermediate_states))\n", + "colors = [cm.rainbow(x) for x in evenly_spaced_interval]\n", + "\n", + "\n", + "def indicate_masses():\n", + " plt.xlabel(\"$m$ [GeV]\")\n", + " for i, p in enumerate(intermediate_states):\n", + " plt.gca().axvline(\n", + " x=p.mass, linestyle=\"dotted\", label=p.name, color=colors[i]\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data_frame = pd.DataFrame(data_set)\n", + "data_frame[\"m_3+4\"].hist(bins=100, alpha=0.5, density=True)\n", + "indicate_masses()\n", + "plt.legend();" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Optimize the amplitude model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{seealso}\n", + "\n", + "{doc}`usage/3_perform_fit`\n", + "\n", + ":::" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "estimator = SympyUnbinnedNLL(sympy_model, data_set, phsp_set, backend=\"jax\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-cell" + ] + }, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "\n", + "\n", + "def compare_model(\n", + " variable_name,\n", + " data_set,\n", + " phsp_set,\n", + " intensity_model,\n", + " bins=150,\n", + "):\n", + " data = data_set[variable_name]\n", + " phsp = phsp_set[variable_name]\n", + " intensities = intensity_model(phsp_set)\n", + " plt.hist(data, bins=bins, alpha=0.5, label=\"data\", density=True)\n", + " plt.hist(\n", + " phsp,\n", + " weights=intensities,\n", + " bins=bins,\n", + " histtype=\"step\",\n", + " color=\"red\",\n", + " label=\"initial fit model\",\n", + " density=True,\n", + " )\n", + " indicate_masses()\n", + " plt.legend()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "initial_parameters = {\n", + " \"C[J/\\\\psi(1S) \\\\to f_{0}(1500)_{0} \\\\gamma_{+1};f_{0}(1500) \\\\to \\\\pi^{0}_{0} \\\\pi^{0}_{0}]\": 1.0\n", + " + 0.0j,\n", + " \"Gamma_f(0)(500)\": 0.3,\n", + " \"Gamma_f(0)(980)\": 0.1,\n", + " \"m_f(0)(1710)\": 1.75,\n", + " \"Gamma_f(0)(1710)\": 0.2,\n", + "}\n", + "intensity.update_parameters(initial_parameters)\n", + "compare_model(\"m_3+4\", data_set, phsp_set, intensity)\n", + "print(\"Number of free parameters:\", len(initial_parameters))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{tip}\n", + "Insert behavior into the {class}`.Optimizer` by defining a custom {class}`.Callback` class. Here's one that live updates a plot of the latest fit model!\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-cell" + ] + }, + "outputs": [], + "source": [ + "import os\n", + "\n", + "from IPython.display import clear_output\n", + "from tensorwaves.optimizer.callbacks import Callback, CallbackList\n", + "\n", + "\n", + "class PyplotCallback(Callback):\n", + " def __init__(self, step_size=10):\n", + " self.__step_size = step_size\n", + " self.__fig, self.__ax = plt.subplots(1, figsize=(8, 5))\n", + " self.__latest_parameters = {}\n", + "\n", + " def on_iteration_end(self, function_call, logs=None):\n", + " self.__latest_parameters = logs[\"parameters\"]\n", + " if function_call % self.__step_size != 0:\n", + " return\n", + " if \"READTHEDOCS\" in os.environ:\n", + " return\n", + " self.update_plot()\n", + " clear_output(wait=True)\n", + " display(plt.gcf())\n", + "\n", + " def on_function_call_end(self):\n", + " self.update_plot()\n", + "\n", + " def update_plot(self):\n", + " bins = 100\n", + " data = data_set[\"m_3+4\"]\n", + " phsp = phsp_set[\"m_3+4\"]\n", + " intensity.update_parameters(self.__latest_parameters)\n", + " intensities = intensity(phsp_set)\n", + " self.__ax.cla()\n", + " self.__ax.hist(data, bins=bins, alpha=0.5, label=\"data\", density=True)\n", + " self.__ax.hist(\n", + " phsp,\n", + " weights=intensities,\n", + " bins=bins,\n", + " histtype=\"step\",\n", + " color=\"red\",\n", + " label=\"fit model\",\n", + " density=True,\n", + " )\n", + " self.__ax.set_xlim((0.25, 2.5))\n", + " self.__ax.set_ylim((0, 1.9))\n", + " indicate_masses()\n", + " plt.gcf().legend()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "minuit2 = Minuit2(\n", + " callback=CallbackList(\n", + " [CSVSummary(\"traceback.csv\", step_size=2), PyplotCallback()]\n", + " )\n", + ")\n", + "fit_result = minuit2.optimize(estimator, initial_parameters)\n", + "fit_result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fit_traceback = pd.read_csv(\"traceback.csv\")\n", + "fig, (ax1, ax2) = plt.subplots(\n", + " 2, figsize=(7, 9), sharex=True, gridspec_kw={\"height_ratios\": [1, 2]}\n", + ")\n", + "fit_traceback.plot(\"function_call\", \"estimator_value\", ax=ax1)\n", + "fit_traceback.plot(\"function_call\", list(initial_parameters), ax=ax2)\n", + "fig.tight_layout()\n", + "ax2.set_xlabel(\"function call\");" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/usage.md b/docs/usage.md deleted file mode 100644 index 21e8e077..00000000 --- a/docs/usage.md +++ /dev/null @@ -1,16 +0,0 @@ -# Usage - -The examples in the following pages illustrate how to use the tools that -{mod}`tensorwaves` offers. The examples were written as Jupyter notebooks and -can be downloaded -[here](https://github.com/ComPWA/tensorwaves/tree/master/docs/usage). - -```{toctree} ---- -maxdepth: 2 -glob: ---- -usage/1_create_model -usage/2_generate_data -usage/3_perform_fit -``` diff --git a/docs/usage/1_create_model.ipynb b/docs/usage/1_create_model.ipynb index 0f5dea4a..84ad2369 100644 --- a/docs/usage/1_create_model.ipynb +++ b/docs/usage/1_create_model.ipynb @@ -31,7 +31,7 @@ "---\n", "class: dropdown\n", "---\n", - "As {doc}`3_perform_fit` serves to illustrate usage only, we make the amplitude model here a bit simpler by not allowing $\\omega$ resonances (which are narrow and therefore hard to fit). For this reason, we can also limit the {class}`~expertsystem.reaction.default_settings.InteractionTypes` to {attr}`~expertsystem.reaction.default_settings.InteractionTypes.Strong`.\n", + "As {doc}`3_perform_fit` serves to illustrate usage only, we make the amplitude model here a bit simpler by not allowing $\\omega$ resonances (which are narrow and therefore hard to fit). For this reason, we can also limit the {class}`~expertsystem.reaction.default_settings.InteractionTypes` to {attr}`~expertsystem.reaction.default_settings.InteractionTypes.STRONG`.\n", "```" ] }, @@ -75,7 +75,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Next we convert the {attr}`~expertsystem.reaction.Result.solutions` into an {class}`~expertsystem.amplitude.model.AmplitudeModel`. This can be done with the {func}`~expertsystem.generate_amplitudes` method." + "Next we convert the {attr}`~expertsystem.reaction.Result.transitions` into an {class}`~expertsystem.amplitude.helicity.HelicityModel`. This can be done with `expertsystem.amplitude.get_builder` and `~expertsystem.amplitude.SympyHelicityAmplitudeGenerator.generate`." ] }, { @@ -84,7 +84,8 @@ "metadata": {}, "outputs": [], "source": [ - "model = es.generate_amplitudes(result)\n", + "model_builder = es.amplitude.get_builder(result)\n", + "model = model_builder.generate()\n", "list(model.parameters)" ] }, @@ -92,7 +93,11 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Note that we have to specify the dynamics for the resonances. We choose to use {class}`~expertsystem.amplitude.model.RelativisticBreitWigner` for all resonances:" + "The heart of the model is a sympy expression that contains the full description of the intensity model. Note two things:\n", + "1. The coefficients for the different amplitudes are **complex** valued.\n", + "2. By default there is no dynamics in the model and it still has to be specified.\n", + "\n", + "We choose to use {func}`~expertsystem.amplitude.dynamics.lineshape.relativistic_breit_wigner_with_ff` for all resonances. The {meth}`~expertsystem.amplitude.helicity.HelicityAmplitudeBuilder.set_dynamics` is a convenience interface for replacing the dynamics for intermediate states." ] }, { @@ -101,16 +106,20 @@ "metadata": {}, "outputs": [], "source": [ + "from expertsystem.amplitude.dynamics.builder import (\n", + " create_relativistic_breit_wigner_with_ff,\n", + ")\n", + "\n", "for name in result.get_intermediate_particles().names:\n", - " model.dynamics.set_breit_wigner(name)\n", - "{name: type(dyn) for name, dyn in model.dynamics.items()}" + " model_builder.set_dynamics(name, create_relativistic_breit_wigner_with_ff)\n", + "model = model_builder.generate()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Finally, we can write the {class}`~expertsystem.amplitude.model.AmplitudeModel`, using the {func}`~expertsystem.io.write` function:" + "Now let's take another look at the parameters of the model" ] }, { @@ -119,14 +128,33 @@ "metadata": {}, "outputs": [], "source": [ - "es.io.write(model, \"amplitude_model_helicity.yml\")" + "sorted(model.parameters, key=lambda s: s.name)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, we can write the {class}`~expertsystem.amplitude.helicity.HelicityModel` to file via {mod}`pickle`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pickle\n", + "\n", + "with open(\"helicity_model.pickle\", \"wb\") as model_file:\n", + " pickle.dump(model, model_file)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Cool, that's it! We now have a recipe for an amplitude model with which to {doc}`generate data <2_generate_data>` and {doc}`perform a fit <3_perform_fit>`! In the next steps, we will use use this {class}`~expertsystem.amplitude.model.AmplitudeModel` as a fit model template for {mod}`tensorwaves`." + "Cool, that's it! We now have a recipe for an amplitude model with which to {doc}`generate data <2_generate_data>` and {doc}`perform a fit <3_perform_fit>`! In the next steps, we will use use this {class}`~expertsystem.amplitude.helicity.HelicityModel` as a fit model template for {mod}`tensorwaves`." ] } ], diff --git a/docs/usage/2_generate_data.ipynb b/docs/usage/2_generate_data.ipynb index f2a49558..7a9102c4 100644 --- a/docs/usage/2_generate_data.ipynb +++ b/docs/usage/2_generate_data.ipynb @@ -1,5 +1,21 @@ { "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "jupyter": { + "source_hidden": true + }, + "tags": [ + "remove-cell" + ] + }, + "outputs": [], + "source": [ + "%config Completer.use_jedi = False" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -11,9 +27,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "In this section, we will use the {class}`~expertsystem.amplitude.model.AmplitudeModel` that we created with the expert system in {doc}`the previous step <1_create_model>` to generate a data sample via hit & miss Monte Carlo. We do this with the {mod}`.data.generate` module.\n", + "In this section, we will use the {class}`~expertsystem.amplitude.helicity.HelicityModel` that we created with the expert system in [the previous step](1_create_model) to generate a data sample via hit & miss Monte Carlo. We do this with the {mod}`.data.generate` module.\n", "\n", - "First, we {func}`~.expertsystem.io.load` an {class}`~expertsystem.amplitude.model.AmplitudeModel` that was created in the previous step:" + "First, we {func}`~.expertsystem.io.load` an {class}`~expertsystem.amplitude.helicity.HelicityModel` that was created in the previous step:" ] }, { @@ -22,9 +38,22 @@ "metadata": {}, "outputs": [], "source": [ - "from expertsystem import io\n", + "# TODO: pickling of the HelicityModel does not work, so we have to currently redo all steps from before...\n", + "import expertsystem as es\n", + "from expertsystem.amplitude.dynamics.builder import (\n", + " create_relativistic_breit_wigner_with_ff,\n", + ")\n", "\n", - "model = io.load(\"amplitude_model_helicity.yml\")" + "result = es.generate_transitions(\n", + " initial_state=(\"J/psi(1S)\", [-1, +1]),\n", + " final_state=[\"gamma\", \"pi0\", \"pi0\"],\n", + " allowed_intermediate_particles=[\"f(0)\"],\n", + " allowed_interaction_types=\"strong and EM\",\n", + ")\n", + "model_builder = es.amplitude.get_builder(result)\n", + "for name in result.get_intermediate_particles().names:\n", + " model_builder.set_dynamics(name, create_relativistic_breit_wigner_with_ff)\n", + "model = model_builder.generate()" ] }, { @@ -38,7 +67,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "An {class}`~expertsystem.amplitude.model.AmplitudeModel` defines the kinematics, the particles involved in the reaction, the dynamics used for the model on which to perform the eventual optimization, etc." + "A {class}`~expertsystem.amplitude.helicity.HelicityModel` defines the kinematics, the particles involved in the reaction, the dynamics used for the model on which to perform the eventual optimization, etc." ] }, { @@ -47,11 +76,23 @@ "metadata": {}, "outputs": [], "source": [ + "# TODO: this part will be changed once the kinematics is ported to the expertsystem!\n", "from tensorwaves.physics.helicity_formalism.kinematics import (\n", " HelicityKinematics,\n", + " ParticleReactionKinematicsInfo,\n", + " SubSystem,\n", ")\n", "\n", "kin = HelicityKinematics.from_model(model)\n", + "kin.register_subsystem(SubSystem([[3, 4], [2]], [], []))\n", + "kin.register_subsystem(SubSystem([[3], [4]], [2], []))\n", + "kin.register_invariant_mass([2, 4])\n", + "\n", + "import pickle\n", + "\n", + "with open(\"kinematics.pickle\", \"wb\") as kin_file:\n", + " pickle.dump(kin, kin_file)\n", + "\n", "print(\"Initial state mass:\", kin.reaction_kinematics_info.initial_state_masses)\n", "print(\"Final state masses:\", kin.reaction_kinematics_info.final_state_masses)\n", "print(\"Involved particles:\", model.particles.names)" @@ -110,9 +151,26 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "'Data samples' are more complicated than phase space samples in that they represent the intensity profile resulting from a reaction. You therefore need an {class}`.IntensityTF` object (or, more generally, a {class}`.Function` instance) and a phase space over which to generate that intensity distribution. We call such a data sample an **intensity-based sample**.\n", + "'Data samples' are more complicated than phase space samples in that they represent the intensity profile resulting from a reaction. You therefore need an {class}`.Intensity` object (or, more generally, a {class}`.Function` instance) and a phase space over which to generate that intensity distribution. We call such a data sample an **intensity-based sample**.\n", + "\n", + "An intensity-based sample is generated with the function {func}`.generate_data`. Its usage is similar to {func}`.generate_phsp`, but now you have to give an {obj}`.Intensity` in addition to the {obj}`.Kinematics` object. An {obj}`.Intensity` object can be created with the {class}`.SympyModel` class:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from tensorwaves.physics.amplitude import Intensity, SympyModel\n", + "\n", + "sympy_model = SympyModel(\n", + " expression=model.expression,\n", + " parameters={k: v.value for k, v in model.parameters.items()},\n", + " variables={},\n", + ")\n", "\n", - "An intensity-based sample is generated with the function {func}`.generate_data`. Its usage is similar to {func}`.generate_phsp`, but now you have to give an {class}`.IntensityTF` in addition to the {class}`.Kinematics` object. An {class}`.IntensityTF` object can be created with the {class}`.IntensityBuilder` class:" + "intensity = Intensity(sympy_model)" ] }, { @@ -121,10 +179,10 @@ "metadata": {}, "outputs": [], "source": [ - "from tensorwaves.physics.helicity_formalism.amplitude import IntensityBuilder\n", + "import pickle\n", "\n", - "builder = IntensityBuilder(model.particles, kin)\n", - "intensity = builder.create_intensity(model)" + "with open(\"sympy_model.pickle\", \"wb\") as model_file:\n", + " pickle.dump(sympy_model, model_file)" ] }, { @@ -177,7 +235,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The data set is just a {obj}`dict` of kinematic variables (keys are the names, values is a list of computed values for each event). The numbers you see here are final state IDs as defined in the {class}`~expertsystem.amplitude.model.AmplitudeModel` member of the {class}`~expertsystem.amplitude.model.AmplitudeModel`:" + "The data set is just a {obj}`dict` of kinematic variables (keys are the names, values is a list of computed values for each event). The numbers you see here are final state IDs as defined in the {class}`~expertsystem.amplitude.helicity.HelicityModel` member of the {class}`~expertsystem.amplitude.helicity.HelicityModel`:" ] }, { @@ -264,7 +322,7 @@ "source": [ "import matplotlib.pyplot as plt\n", "\n", - "np.sqrt(data_frame[\"mSq_3+4\"]).hist(bins=100, alpha=0.5, density=True)\n", + "data_frame[\"m_3+4\"].hist(bins=100, alpha=0.5, density=True)\n", "plt.xlabel(\"$m$ [GeV]\")\n", "for i, p in enumerate(intermediate_states):\n", " plt.axvline(x=p.mass, linestyle=\"dotted\", label=p.name, color=colors[i])\n", diff --git a/docs/usage/3_perform_fit.ipynb b/docs/usage/3_perform_fit.ipynb index 9457a514..2c7f5154 100644 --- a/docs/usage/3_perform_fit.ipynb +++ b/docs/usage/3_perform_fit.ipynb @@ -11,7 +11,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "As explained in the [previous step](2_generate_data), an {class}`.IntensityTF` object behaves just like a mathematical function that takes a set of data points as an argument and returns a list of intensities (real numbers). At this stage, we want to optimize the parameters of the intensity model, so that it matches the distribution of our data sample. This is what we call 'performing a fit'." + "As explained in the [previous step](2_generate_data), an {class}`.Intensity` object behaves just like a mathematical function that takes a set of data points as an argument and returns a list of intensities (real numbers). At this stage, we want to optimize the parameters of the intensity model, so that it matches the distribution of our data sample. This is what we call 'performing a fit'." ] }, { @@ -31,10 +31,7 @@ "source": [ "import numpy as np\n", "from expertsystem import io\n", - "from tensorwaves.physics.helicity_formalism.amplitude import IntensityBuilder\n", - "from tensorwaves.physics.helicity_formalism.kinematics import (\n", - " HelicityKinematics,\n", - ")" + "from tensorwaves.physics.amplitude import Intensity, SympyModel" ] }, { @@ -43,12 +40,18 @@ "metadata": {}, "outputs": [], "source": [ - "model = io.load(\"amplitude_model_helicity.yml\")\n", - "kinematics = HelicityKinematics.from_model(model)\n", + "import pickle\n", + "\n", + "with open(\"sympy_model.pickle\", \"rb\") as model_file:\n", + " model = pickle.load(model_file)\n", + "\n", + "intensity = Intensity(model)\n", + "\n", + "with open(\"kinematics.pickle\", \"rb\") as kin_file:\n", + " kinematics = pickle.load(kin_file)\n", + "\n", "data_sample = np.load(\"data_sample.npy\")\n", "phsp_sample = np.load(\"phsp_sample.npy\")\n", - "intensity_builder = IntensityBuilder(model.particles, kinematics)\n", - "intensity = intensity_builder.create_intensity(model)\n", "phsp_set = kinematics.convert(phsp_sample)\n", "data_set = kinematics.convert(data_sample)" ] @@ -75,16 +78,16 @@ "metadata": {}, "outputs": [], "source": [ - "from tensorwaves.estimator import UnbinnedNLL\n", + "from tensorwaves.estimator import SympyUnbinnedNLL\n", "\n", - "estimator = UnbinnedNLL(intensity, data_set, phsp_set)" + "estimator = SympyUnbinnedNLL(model, data_set, phsp_set, backend=\"jax\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The parameters that the {class}`.UnbinnedNLL` carries, have been taken from the {class}`.IntensityTF` object and you'll recognize them from the YAML recipe file that we created in [Step 1](1_create_model):" + "The parameters that the {class}`.UnbinnedNLL` carries, have been taken from the {class}`.Intensity` object and you'll recognize them from the YAML recipe file that we created in [Step 1](1_create_model):" ] }, { @@ -137,11 +140,12 @@ "outputs": [], "source": [ "initial_parameters = {\n", - " \"Phase_J/psi(1S)_to_f(0)(1500)_0+gamma_1;f(0)(1500)_to_pi0_0+pi0_0;\": 0.0,\n", - " \"Width_f(0)(500)\": 0.3,\n", - " \"Width_f(0)(980)\": 0.1,\n", - " \"Position_f(0)(1710)\": 1.75,\n", - " \"Width_f(0)(1710)\": 0.2,\n", + " \"C[J/\\\\psi(1S) \\\\to f_{0}(1500)_{0} \\\\gamma_{+1};f_{0}(1500) \\\\to \\\\pi^{0}_{0} \\\\pi^{0}_{0}]\": 1.0\n", + " + 0.0j,\n", + " \"Gamma_f(0)(500)\": 0.3,\n", + " \"Gamma_f(0)(980)\": 0.1,\n", + " \"m_f(0)(1710)\": 1.75,\n", + " \"Gamma_f(0)(1710)\": 0.2,\n", "}" ] }, @@ -149,9 +153,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Recall that an {class}`.IntensityTF` object is really just a {class}`.Function` that computes the intensity for a certain dataset. This can be seen now nicely when we use these intensities as weights on the phase space sample and plot it together with the original dataset. Here, we look at the invariant mass distribution projection of the final states `3` and `4`, which, {ref}`as we saw before `, are the final state particles $\\pi^0,\\pi^0$.\n", + "Recall that an {class}`.Intensity` object is really just a {class}`.Function` that computes the intensity for a certain dataset. This can be seen now nicely when we use these intensities as weights on the phase space sample and plot it together with the original dataset. Here, we look at the invariant mass distribution projection of the final states `3` and `4`, which, {ref}`as we saw before `, are the final state particles $\\pi^0,\\pi^0$.\n", "\n", - "Don't forget to use {meth}`~.IntensityTF.update_parameters` first!" + "Don't forget to use {meth}`~.Intensity.update_parameters` first!" ] }, { @@ -169,11 +173,10 @@ " data_set,\n", " phsp_set,\n", " intensity_model,\n", - " transform=lambda p: p,\n", " bins=100,\n", "):\n", - " data = transform(data_set[variable_name])\n", - " phsp = transform(phsp_set[variable_name])\n", + " data = data_set[variable_name]\n", + " phsp = phsp_set[variable_name]\n", " intensities = intensity_model(phsp_set)\n", " plt.hist(data, bins=bins, alpha=0.5, label=\"data\", density=True)\n", " plt.hist(\n", @@ -195,7 +198,7 @@ "outputs": [], "source": [ "intensity.update_parameters(initial_parameters)\n", - "compare_model(\"mSq_3+4\", data_set, phsp_set, intensity, np.sqrt)" + "compare_model(\"m_3+4\", data_set, phsp_set, intensity)" ] }, { @@ -237,7 +240,8 @@ " YAMLSummary(\"current_fit_result.yaml\"),\n", " CSVSummary(\"fit_traceback.csv\", step_size=2),\n", " ]\n", - " )\n", + " ),\n", + " use_analytic_gradient=False, # this is still working reliably\n", ")\n", "result = minuit2.optimize(estimator, initial_parameters)\n", "result" @@ -340,7 +344,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Using the same method as above, we renew the parameters of the {class}`.IntensityTF` and plot it again over the data distribution." + "Using the same method as above, we renew the parameters of the {class}`.Intensity` and plot it again over the data distribution." ] }, { @@ -350,7 +354,7 @@ "outputs": [], "source": [ "intensity.update_parameters(latest_parameters)\n", - "compare_model(\"mSq_3+4\", data_set, phsp_set, intensity, np.sqrt)" + "compare_model(\"m_3+4\", data_set, phsp_set, intensity)" ] }, { @@ -429,14 +433,7 @@ "metadata": {}, "outputs": [], "source": [ - "fit_traceback.plot(\n", - " \"function_call\",\n", - " [\n", - " \"Phase_J/psi(1S)_to_f(0)(1500)_0+gamma_1;f(0)(1500)_to_pi0_0+pi0_0;\",\n", - " \"Position_f(0)(1710)\",\n", - " \"Width_f(0)(1710)\",\n", - " ],\n", - ");" + "fit_traceback.plot(\"function_call\", sorted(initial_parameters));" ] } ], diff --git a/reqs/3.6/requirements-dev.txt b/reqs/3.6/requirements-dev.txt index 5130e098..9abda200 100644 --- a/reqs/3.6/requirements-dev.txt +++ b/reqs/3.6/requirements-dev.txt @@ -39,7 +39,7 @@ dm-tree==0.1.5 docutils==0.16 entrypoints==0.3 execnet==1.8.0 -expertsystem==0.6.9 +git+https://github.com/ComPWA/expertsystem@sympy filelock==3.0.12 flake8-blind-except==0.2.0 flake8-bugbear==21.3.1 @@ -73,6 +73,8 @@ ipython-genutils==0.2.0 ipython==7.16.1 ipywidgets==7.6.3 isort==5.7.0 +jax==0.2.9 +jaxlib==0.1.60 jedi==0.18.0 jinja2==2.11.3 json5==0.9.5 @@ -115,6 +117,7 @@ nbstripout==0.3.9 nest-asyncio==1.5.1 nodeenv==1.5.0 notebook==6.2.0 +numba==0.52.0 numpy==1.19.5 oauthlib==3.1.0 opt-einsum==3.3.0 diff --git a/reqs/3.6/requirements-doc.txt b/reqs/3.6/requirements-doc.txt index 397c8ff9..6f4dedfe 100644 --- a/reqs/3.6/requirements-doc.txt +++ b/reqs/3.6/requirements-doc.txt @@ -28,7 +28,7 @@ defusedxml==0.7.0 dm-tree==0.1.5 docutils==0.16 entrypoints==0.3 -expertsystem==0.6.9 +git+https://github.com/ComPWA/expertsystem@sympy flatbuffers==1.12 fuzzywuzzy==0.18.0 gast==0.3.3 @@ -50,6 +50,8 @@ ipykernel==5.5.0 ipython-genutils==0.2.0 ipython==7.16.1 ipywidgets==7.6.3 +jax==0.2.9 +jaxlib==0.1.60 jedi==0.18.0 jinja2==2.11.3 jsonschema==3.2.0 @@ -110,6 +112,7 @@ qtpy==1.9.0 requests-oauthlib==1.3.0 requests==2.25.1 rsa==4.7.2 +scipy==1.5.4 send2trash==1.5.0 six==1.15.0 smmap==3.0.5 diff --git a/reqs/3.6/requirements-sty.txt b/reqs/3.6/requirements-sty.txt index 989c408c..ca3559d2 100644 --- a/reqs/3.6/requirements-sty.txt +++ b/reqs/3.6/requirements-sty.txt @@ -26,7 +26,7 @@ distlib==0.3.1 dm-tree==0.1.5 docutils==0.16 execnet==1.8.0 -expertsystem==0.6.9 +git+https://github.com/ComPWA/expertsystem@sympy filelock==3.0.12 flake8-blind-except==0.2.0 flake8-bugbear==21.3.1 @@ -51,6 +51,8 @@ importlib-resources==3.0.0 iniconfig==1.1.1 ipython-genutils==0.2.0 isort==5.7.0 +jax==0.2.9 +jaxlib==0.1.60 jsonschema==3.2.0 jupyter-core==4.7.1 keras-preprocessing==1.1.2 diff --git a/reqs/3.6/requirements-test.txt b/reqs/3.6/requirements-test.txt index cd166b51..c889850d 100644 --- a/reqs/3.6/requirements-test.txt +++ b/reqs/3.6/requirements-test.txt @@ -18,7 +18,7 @@ cycler==0.10.0 decorator==4.4.2 dm-tree==0.1.5 execnet==1.8.0 -expertsystem==0.6.9 +git+https://github.com/ComPWA/expertsystem@sympy flatbuffers==1.12 gast==0.3.3 google-auth-oauthlib==0.4.3 @@ -34,6 +34,8 @@ importlib-metadata==3.7.2 importlib-resources==3.0.0 iniconfig==1.1.1 jsonschema==3.2.0 +jax==0.2.9 +jaxlib==0.1.60 keras-preprocessing==1.1.2 kiwisolver==1.3.1 markdown==3.3.4 diff --git a/reqs/3.6/requirements.txt b/reqs/3.6/requirements.txt index b96e42fe..5f74a75a 100644 --- a/reqs/3.6/requirements.txt +++ b/reqs/3.6/requirements.txt @@ -14,7 +14,7 @@ chardet==4.0.0 cloudpickle==1.6.0 decorator==4.4.2 dm-tree==0.1.5 -expertsystem==0.6.9 +git+https://github.com/ComPWA/expertsystem@sympy flatbuffers==1.12 gast==0.3.3 google-auth-oauthlib==0.4.3 diff --git a/reqs/3.7/requirements-dev.txt b/reqs/3.7/requirements-dev.txt index d0bf6893..95a70f9a 100644 --- a/reqs/3.7/requirements-dev.txt +++ b/reqs/3.7/requirements-dev.txt @@ -37,7 +37,7 @@ dm-tree==0.1.5 docutils==0.16 entrypoints==0.3 execnet==1.8.0 -expertsystem==0.6.9 +git+https://github.com/ComPWA/expertsystem@sympy filelock==3.0.12 flake8-blind-except==0.2.0 flake8-bugbear==21.3.1 @@ -69,6 +69,8 @@ ipython-genutils==0.2.0 ipython==7.21.0 ipywidgets==7.6.3 isort==5.7.0 +jax==0.2.9 +jaxlib==0.1.61 jedi==0.18.0 jinja2==2.11.3 json5==0.9.5 @@ -90,6 +92,7 @@ kiwisolver==1.3.1 labels==20.1.0 lazy-object-proxy==1.5.2 livereload==2.6.3 +llvmlite==0.35.0 markdown-it-py==0.6.2 markdown==3.3.4 markupsafe==1.1.1 @@ -111,6 +114,7 @@ nbstripout==0.3.9 nest-asyncio==1.5.1 nodeenv==1.5.0 notebook==6.2.0 +numba==0.52.0 numpy==1.19.5 oauthlib==3.1.0 opt-einsum==3.3.0 diff --git a/reqs/3.7/requirements-doc.txt b/reqs/3.7/requirements-doc.txt index 611a811b..625cacc5 100644 --- a/reqs/3.7/requirements-doc.txt +++ b/reqs/3.7/requirements-doc.txt @@ -28,7 +28,7 @@ defusedxml==0.7.0 dm-tree==0.1.5 docutils==0.16 entrypoints==0.3 -expertsystem==0.6.9 +git+https://github.com/ComPWA/expertsystem@sympy flatbuffers==1.12 fuzzywuzzy==0.18.0 gast==0.3.3 @@ -49,6 +49,8 @@ ipykernel==5.5.0 ipython-genutils==0.2.0 ipython==7.21.0 ipywidgets==7.6.3 +jax==0.2.9 +jaxlib==0.1.61 jedi==0.18.0 jinja2==2.11.3 jsonschema==3.2.0 @@ -109,6 +111,7 @@ qtpy==1.9.0 requests-oauthlib==1.3.0 requests==2.25.1 rsa==4.7.2 +scipy==1.6.1 send2trash==1.5.0 six==1.15.0 smmap==3.0.5 diff --git a/reqs/3.7/requirements-sty.txt b/reqs/3.7/requirements-sty.txt index cd790c63..c3f77a42 100644 --- a/reqs/3.7/requirements-sty.txt +++ b/reqs/3.7/requirements-sty.txt @@ -25,7 +25,7 @@ distlib==0.3.1 dm-tree==0.1.5 docutils==0.16 execnet==1.8.0 -expertsystem==0.6.9 +git+https://github.com/ComPWA/expertsystem@sympy filelock==3.0.12 flake8-blind-except==0.2.0 flake8-bugbear==21.3.1 @@ -49,11 +49,14 @@ importlib-metadata==3.7.2 iniconfig==1.1.1 ipython-genutils==0.2.0 isort==5.7.0 +jax==0.2.9 +jaxlib==0.1.61 jsonschema==3.2.0 jupyter-core==4.7.1 keras-preprocessing==1.1.2 kiwisolver==1.3.1 lazy-object-proxy==1.5.2 +llvmlite==0.35.0 markdown==3.3.4 matplotlib==3.3.4 mccabe==0.6.1 @@ -63,6 +66,7 @@ mypy==0.812 nbformat==5.1.2 nbstripout==0.3.9 nodeenv==1.5.0 +numba==0.52.0 numpy==1.19.5 oauthlib==3.1.0 opt-einsum==3.3.0 diff --git a/reqs/3.7/requirements-test.txt b/reqs/3.7/requirements-test.txt index 7f4c661b..b24fdd7b 100644 --- a/reqs/3.7/requirements-test.txt +++ b/reqs/3.7/requirements-test.txt @@ -18,7 +18,7 @@ cycler==0.10.0 decorator==4.4.2 dm-tree==0.1.5 execnet==1.8.0 -expertsystem==0.6.9 +git+https://github.com/ComPWA/expertsystem@sympy flatbuffers==1.12 gast==0.3.3 google-auth-oauthlib==0.4.3 @@ -33,6 +33,8 @@ iminuit==2.4.0 importlib-metadata==3.7.2 iniconfig==1.1.1 jsonschema==3.2.0 +jax==0.2.9 +jaxlib==0.1.61 keras-preprocessing==1.1.2 kiwisolver==1.3.1 markdown==3.3.4 diff --git a/reqs/3.7/requirements.txt b/reqs/3.7/requirements.txt index d490eebc..0e8a2c27 100644 --- a/reqs/3.7/requirements.txt +++ b/reqs/3.7/requirements.txt @@ -14,7 +14,7 @@ chardet==4.0.0 cloudpickle==1.6.0 decorator==4.4.2 dm-tree==0.1.5 -expertsystem==0.6.9 +git+https://github.com/ComPWA/expertsystem@sympy flatbuffers==1.12 gast==0.3.3 google-auth-oauthlib==0.4.3 diff --git a/reqs/3.8/requirements-dev.txt b/reqs/3.8/requirements-dev.txt index 3daeb20b..b27370ba 100644 --- a/reqs/3.8/requirements-dev.txt +++ b/reqs/3.8/requirements-dev.txt @@ -37,7 +37,7 @@ dm-tree==0.1.5 docutils==0.16 entrypoints==0.3 execnet==1.8.0 -expertsystem==0.6.9 +git+https://github.com/ComPWA/expertsystem@sympy filelock==3.0.12 flake8-blind-except==0.2.0 flake8-bugbear==21.3.1 @@ -69,6 +69,8 @@ ipython-genutils==0.2.0 ipython==7.21.0 ipywidgets==7.6.3 isort==5.7.0 +jax==0.2.9 +jaxlib==0.1.61 jedi==0.18.0 jinja2==2.11.3 json5==0.9.5 @@ -90,6 +92,7 @@ kiwisolver==1.3.1 labels==20.1.0 lazy-object-proxy==1.5.2 livereload==2.6.3 +llvmlite==0.35.0 markdown-it-py==0.6.2 markdown==3.3.4 markupsafe==1.1.1 @@ -111,6 +114,7 @@ nbstripout==0.3.9 nest-asyncio==1.5.1 nodeenv==1.5.0 notebook==6.2.0 +numba==0.52.0 numpy==1.19.5 oauthlib==3.1.0 opt-einsum==3.3.0 diff --git a/reqs/3.8/requirements-doc.txt b/reqs/3.8/requirements-doc.txt index fdb24986..ec6e2f8e 100644 --- a/reqs/3.8/requirements-doc.txt +++ b/reqs/3.8/requirements-doc.txt @@ -28,7 +28,7 @@ defusedxml==0.7.0 dm-tree==0.1.5 docutils==0.16 entrypoints==0.3 -expertsystem==0.6.9 +git+https://github.com/ComPWA/expertsystem@sympy flatbuffers==1.12 fuzzywuzzy==0.18.0 gast==0.3.3 @@ -49,6 +49,8 @@ ipykernel==5.5.0 ipython-genutils==0.2.0 ipython==7.21.0 ipywidgets==7.6.3 +jax==0.2.9 +jaxlib==0.1.61 jedi==0.18.0 jinja2==2.11.3 jsonschema==3.2.0 @@ -109,6 +111,7 @@ qtpy==1.9.0 requests-oauthlib==1.3.0 requests==2.25.1 rsa==4.7.2 +scipy==1.6.1 send2trash==1.5.0 six==1.15.0 smmap==3.0.5 diff --git a/reqs/3.8/requirements-sty.txt b/reqs/3.8/requirements-sty.txt index 0396c47c..fd4d308b 100644 --- a/reqs/3.8/requirements-sty.txt +++ b/reqs/3.8/requirements-sty.txt @@ -25,7 +25,7 @@ distlib==0.3.1 dm-tree==0.1.5 docutils==0.16 execnet==1.8.0 -expertsystem==0.6.9 +git+https://github.com/ComPWA/expertsystem@sympy filelock==3.0.12 flake8-blind-except==0.2.0 flake8-bugbear==21.3.1 @@ -48,11 +48,14 @@ iminuit==2.4.0 iniconfig==1.1.1 ipython-genutils==0.2.0 isort==5.7.0 +jax==0.2.9 +jaxlib==0.1.61 jsonschema==3.2.0 jupyter-core==4.7.1 keras-preprocessing==1.1.2 kiwisolver==1.3.1 lazy-object-proxy==1.5.2 +llvmlite==0.35.0 markdown==3.3.4 matplotlib==3.3.4 mccabe==0.6.1 @@ -62,6 +65,7 @@ mypy==0.812 nbformat==5.1.2 nbstripout==0.3.9 nodeenv==1.5.0 +numba==0.52.0 numpy==1.19.5 oauthlib==3.1.0 opt-einsum==3.3.0 diff --git a/reqs/3.8/requirements-test.txt b/reqs/3.8/requirements-test.txt index 887cae9c..e55086b2 100644 --- a/reqs/3.8/requirements-test.txt +++ b/reqs/3.8/requirements-test.txt @@ -18,7 +18,7 @@ cycler==0.10.0 decorator==4.4.2 dm-tree==0.1.5 execnet==1.8.0 -expertsystem==0.6.9 +git+https://github.com/ComPWA/expertsystem@sympy flatbuffers==1.12 gast==0.3.3 google-auth-oauthlib==0.4.3 @@ -32,6 +32,8 @@ idna==2.10 iminuit==2.4.0 iniconfig==1.1.1 jsonschema==3.2.0 +jax==0.2.9 +jaxlib==0.1.61 keras-preprocessing==1.1.2 kiwisolver==1.3.1 markdown==3.3.4 diff --git a/reqs/3.8/requirements.txt b/reqs/3.8/requirements.txt index 25b89040..0b17716d 100644 --- a/reqs/3.8/requirements.txt +++ b/reqs/3.8/requirements.txt @@ -14,7 +14,7 @@ chardet==4.0.0 cloudpickle==1.6.0 decorator==4.4.2 dm-tree==0.1.5 -expertsystem==0.6.9 +git+https://github.com/ComPWA/expertsystem@sympy flatbuffers==1.12 gast==0.3.3 google-auth-oauthlib==0.4.3 diff --git a/reqs/requirements-doc.in b/reqs/requirements-doc.in index 5bb7a781..bc3b051f 100644 --- a/reqs/requirements-doc.in +++ b/reqs/requirements-doc.in @@ -14,3 +14,7 @@ sphinx-panels sphinx-thebe sphinx-togglebutton sphobjinv + +# Additional backens +jax +jaxlib diff --git a/reqs/requirements-sty.in b/reqs/requirements-sty.in index 019b539e..e03078a7 100644 --- a/reqs/requirements-sty.in +++ b/reqs/requirements-sty.in @@ -18,3 +18,6 @@ mypy >= 0.570 # attrs support pep8-naming pydocstyle pylint + +# Additional backens +numba diff --git a/reqs/requirements-test.in b/reqs/requirements-test.in index 97f0b871..69f96b2e 100644 --- a/reqs/requirements-test.in +++ b/reqs/requirements-test.in @@ -8,3 +8,7 @@ pytest-cov pytest-profiling pytest-xdist scipy + +# Additional backens +jax +jaxlib diff --git a/setup.cfg b/setup.cfg index f37ee108..8355486d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -42,7 +42,7 @@ setup_requires = setuptools_scm install_requires = amplitf - expertsystem == 0.6.9 + expertsystem @ git+https://github.com/ComPWA/expertsystem@sympy iminuit >= 2.0 numpy pandas @@ -79,8 +79,12 @@ disallow_untyped_defs = False ignore_errors = True ; https://github.com/ComPWA/tensorwaves/issues/125 +[mypy-jax.*] +ignore_missing_imports = True [mypy-matplotlib.*] ignore_missing_imports = True +[mypy-numba.*] +ignore_missing_imports = True [mypy-numpy.*] ignore_missing_imports = True [mypy-pandas.*] diff --git a/src/tensorwaves/data/generate.py b/src/tensorwaves/data/generate.py index 736b6c91..77611593 100644 --- a/src/tensorwaves/data/generate.py +++ b/src/tensorwaves/data/generate.py @@ -1,9 +1,11 @@ """Tools to facilitate data sample generation.""" import logging -from typing import Callable, Optional +from typing import Callable, Optional, Tuple import numpy as np +from expertsystem.amplitude.data import MomentumPool +from expertsystem.amplitude.kinematics import HelicityKinematics, ReactionInfo from tqdm import tqdm from tensorwaves.data.tf_phasespace import ( @@ -12,14 +14,9 @@ ) from tensorwaves.interfaces import ( Function, - Kinematics, PhaseSpaceGenerator, UniformRealNumberGenerator, ) -from tensorwaves.physics.helicity_formalism.kinematics import ( - HelicityKinematics, - ParticleReactionKinematicsInfo, -) def _generate_data_bunch( @@ -27,20 +24,24 @@ def _generate_data_bunch( phsp_generator: PhaseSpaceGenerator, random_generator: UniformRealNumberGenerator, intensity: Function, - kinematics: Kinematics, -) -> np.ndarray: + kinematics: HelicityKinematics, +) -> Tuple[MomentumPool, float]: phsp_sample, weights = phsp_generator.generate( bunch_size, random_generator ) dataset = kinematics.convert(phsp_sample) intensities = intensity(dataset) - maxvalue = np.max(intensities) + maxvalue: float = np.max(intensities) uniform_randoms = random_generator(bunch_size, max_value=maxvalue) - phsp_sample = phsp_sample.transpose(1, 0, 2) - - return (phsp_sample[weights * intensities > uniform_randoms], maxvalue) + hit_and_miss_sample = MomentumPool( + { + i: four_momenta[weights * intensities > uniform_randoms] + for i, four_momenta in phsp_sample.items() + } + ) + return hit_and_miss_sample, maxvalue def generate_data( @@ -48,20 +49,18 @@ def generate_data( kinematics: HelicityKinematics, intensity: Function, phsp_generator: Callable[ - [ParticleReactionKinematicsInfo], PhaseSpaceGenerator + [ReactionInfo], PhaseSpaceGenerator ] = TFPhaseSpaceGenerator, random_generator: Optional[UniformRealNumberGenerator] = None, bunch_size: int = 50000, -) -> np.ndarray: +) -> MomentumPool: """Facade function for creating data samples based on an intensities. Args: size: Sample size to generate. - kinematics: A kinematics instance. Note that this instance must have a - property :attr:`~.HelicityKinematics.reaction_kinematics_info` of - the type `.ParticleReactionKinematicsInfo`, otherwise the phase - space generator instance cannot be constructed. - intensity: The intensity which will be sampled. + kinematics: A `~expertsystem.amplitude.kinematics.HelicityKinematics` + instance. + intensity: The intensity `.Function` that will be sampled. phsp_generator: Class of a phase space generator. random_generator: A uniform real random number generator. Defaults to `.TFUniformRealNumberGenerator` with **indeterministic** behavior. @@ -69,7 +68,7 @@ def generate_data( generated from many smaller samples, aka bunches. """ - phsp_gen_instance = phsp_generator(kinematics.reaction_kinematics_info) + phsp_gen_instance = phsp_generator(kinematics.reaction_info) if random_generator is None: random_generator = TFUniformRealNumberGenerator() @@ -78,9 +77,9 @@ def generate_data( desc="Generating intensity-based sample", disable=logging.getLogger().level > logging.WARNING, ) - events = np.array([]) + momentum_pool = MomentumPool({}) current_max = 0.0 - while np.size(events, 0) < size: + while momentum_pool.n_events < size: bunch, maxvalue = _generate_data_bunch( bunch_size, phsp_gen_instance, @@ -88,45 +87,46 @@ def generate_data( intensity, kinematics, ) - if maxvalue > current_max: current_max = 1.05 * maxvalue - if np.size(events, 0) > 0: + if momentum_pool.n_events > 0: logging.info( "processed bunch maximum of %s is over current" " maximum %s. Restarting generation!", maxvalue, current_max, ) - events = np.array([]) + momentum_pool = MomentumPool({}) progress_bar.update() continue - if np.size(events, 0) > 0: - events = np.vstack((events, bunch)) + if np.size(momentum_pool, 0) > 0: + momentum_pool.append(bunch) else: - events = bunch + momentum_pool = bunch progress_bar.update() progress_bar.close() - return events[0:size].transpose(1, 0, 2) + return momentum_pool def generate_phsp( size: int, kinematics: HelicityKinematics, phsp_generator: Callable[ - [ParticleReactionKinematicsInfo], PhaseSpaceGenerator + [ReactionInfo], PhaseSpaceGenerator ] = TFPhaseSpaceGenerator, random_generator: Optional[UniformRealNumberGenerator] = None, bunch_size: int = 50000, -) -> np.ndarray: +) -> MomentumPool: """Facade function for creating (unweighted) phase space samples. Args: size: Sample size to generate. kinematics: A kinematics instance. Note that this instance must have a - property :attr:`~.HelicityKinematics.reaction_kinematics_info` of - the type `.ParticleReactionKinematicsInfo`, otherwise the phase - space generator instance cannot be constructed. + property + `~expertsystem.amplitude.kinematics.HelicityKinematics.reaction_info` + of the type + `expertsystem.amplitude.kinematics.ReactionInfo`, + otherwise the phase space generator instance cannot be constructed. phsp_generator: Class of a phase space generator. random_generator: A uniform real random number generator. Defaults to `.TFUniformRealNumberGenerator` with **indeterministic** behavior. @@ -134,7 +134,7 @@ def generate_phsp( generated from many smaller samples, aka bunches. """ - phsp_gen_instance = phsp_generator(kinematics.reaction_kinematics_info) + phsp_gen_instance = phsp_generator(kinematics.reaction_info) if random_generator is None: random_generator = TFUniformRealNumberGenerator() @@ -143,21 +143,23 @@ def generate_phsp( desc="Generating phase space sample", disable=logging.getLogger().level > logging.WARNING, ) - events = np.array([]) - while np.size(events, 0) < size: - four_momenta, weights = phsp_gen_instance.generate( + momentum_pool = MomentumPool({}) + while momentum_pool.n_events < size: + phsp_sample, weights = phsp_gen_instance.generate( bunch_size, random_generator ) - four_momenta = four_momenta.transpose(1, 0, 2) - hit_and_miss_randoms = random_generator(bunch_size) + bunch = MomentumPool( + { + i: four_momenta[weights > hit_and_miss_randoms] + for i, four_momenta in phsp_sample.items() + } + ) - bunch = four_momenta[weights > hit_and_miss_randoms] - - if np.size(events, 0) > 0: - events = np.vstack((events, bunch)) + if momentum_pool.n_events > 0: + momentum_pool.append(bunch) else: - events = bunch + momentum_pool = bunch progress_bar.update() progress_bar.close() - return events[0:size].transpose(1, 0, 2) + return momentum_pool.select_events(slice(0, size)) diff --git a/src/tensorwaves/data/tf_phasespace.py b/src/tensorwaves/data/tf_phasespace.py index d0d89052..6e4343dc 100644 --- a/src/tensorwaves/data/tf_phasespace.py +++ b/src/tensorwaves/data/tf_phasespace.py @@ -1,35 +1,36 @@ """Phase space generation using tensorflow.""" -from typing import Optional +from typing import Optional, Tuple -import numpy as np +import expertsystem.amplitude.kinematics as es import phasespace import tensorflow as tf +from expertsystem.amplitude.data import MomentumPool, ScalarSequence from phasespace.random import get_rng from tensorwaves.interfaces import ( PhaseSpaceGenerator, UniformRealNumberGenerator, ) -from tensorwaves.physics.helicity_formalism.kinematics import ( - ParticleReactionKinematicsInfo, -) class TFPhaseSpaceGenerator(PhaseSpaceGenerator): """Implements a phase space generator using tensorflow.""" - def __init__( - self, reaction_kinematics_info: ParticleReactionKinematicsInfo - ) -> None: + def __init__(self, reaction_info: es.ReactionInfo) -> None: + initial_states = reaction_info.initial_state.values() + if len(initial_states) != 1: + raise ValueError("Not a 1-to-n body decay") + initial_state = next(iter(initial_states)) self.phsp_gen = phasespace.nbody_decay( - reaction_kinematics_info.total_invariant_mass, - reaction_kinematics_info.final_state_masses, + mass_top=initial_state.mass, + masses=[p.mass for p in reaction_info.final_state.values()], + names=list(map(str, reaction_info.final_state)), ) def generate( self, size: int, rng: UniformRealNumberGenerator - ) -> np.ndarray: + ) -> Tuple[MomentumPool, ScalarSequence]: if not isinstance(rng, TFUniformRealNumberGenerator): raise TypeError( f"{TFPhaseSpaceGenerator.__name__} requires a " @@ -39,10 +40,13 @@ def generate( weights, particles = self.phsp_gen.generate( n_events=size, seed=rng.generator ) - particles = np.array( - tuple(particles[x].numpy() for x in particles.keys()) + momentum_pool = MomentumPool( + { + int(label): momenta.numpy()[:, [3, 0, 1, 2]] + for label, momenta in particles.items() + } ) - return particles, weights.numpy() + return momentum_pool, ScalarSequence(weights.numpy()) class TFUniformRealNumberGenerator(UniformRealNumberGenerator): @@ -54,13 +58,15 @@ def __init__(self, seed: Optional[float] = None): def __call__( self, size: int, min_value: float = 0.0, max_value: float = 1.0 - ) -> np.ndarray: - return self.generator.uniform( - shape=[size], - minval=min_value, - maxval=max_value, - dtype=self.dtype, - ).numpy() + ) -> ScalarSequence: + return ScalarSequence( + self.generator.uniform( + shape=[size], + minval=min_value, + maxval=max_value, + dtype=self.dtype, + ).numpy() + ) @property def seed(self) -> Optional[float]: diff --git a/src/tensorwaves/estimator.py b/src/tensorwaves/estimator.py index 42b80ce0..6eaf6ef2 100644 --- a/src/tensorwaves/estimator.py +++ b/src/tensorwaves/estimator.py @@ -2,45 +2,40 @@ All estimators have to implement the `~.interfaces.Estimator` interface. """ -from typing import Dict +from typing import Callable, Dict, Mapping, Union -import numpy as np -import tensorflow as tf +from expertsystem.amplitude.data import DataSet -from tensorwaves.interfaces import Estimator, Function +from tensorwaves.interfaces import Estimator, Model +from tensorwaves.physics.amplitude import get_backend_modules -class _NormalizedFunction(Function): - def __init__( - self, - unnormalized_function: Function, - norm_dataset: dict, - norm_volume: float = 1.0, - ) -> None: - self._model = unnormalized_function - # it is crucial to convert the input data to tensors - # otherwise the memory footprint can increase dramatically - self._norm_dataset = { - x: tf.constant(y) for x, y in norm_dataset.items() - } - self._norm_volume = norm_volume - - def __call__(self, dataset: dict) -> tf.Tensor: - normalization = tf.multiply( - self._norm_volume, - tf.reduce_mean(self._model(self._norm_dataset)), - ) - return tf.divide(self._model(dataset), normalization) +def gradient_creator( + function: Callable[[Mapping[str, Union[float, complex]]], float], + backend: Union[str, tuple, dict], +) -> Callable[ + [Mapping[str, Union[float, complex]]], Dict[str, Union[float, complex]] +]: + # pylint: disable=import-outside-toplevel + def not_implemented( + parameters: Mapping[str, Union[float, complex]] + ) -> Dict[str, Union[float, complex]]: + raise NotImplementedError("Gradient not implemented.") + + if isinstance(backend, str) and backend == "jax": + import jax + from jax.config import config - @property - def parameters(self) -> Dict[str, tf.Variable]: - return self._model.parameters + config.update("jax_enable_x64", True) - def update_parameters(self, new_parameters: dict) -> None: - self._model.update_parameters(new_parameters) + return jax.grad(function) + return not_implemented -class UnbinnedNLL(Estimator): + +class SympyUnbinnedNLL( # pylint: disable=too-many-instance-attributes + Estimator +): """Unbinned negative log likelihood estimator. Args: @@ -54,25 +49,48 @@ class UnbinnedNLL(Estimator): """ - def __init__(self, model: Function, dataset: dict, phsp_set: dict) -> None: - if phsp_set and len(phsp_set) > 0: - self.__model: Function = _NormalizedFunction(model, phsp_set) - else: - self.__model = model - self.__dataset = dataset - - def __call__(self) -> float: - props = self.__model(self.__dataset) - logs = tf.math.log(props) - log_lh = tf.reduce_sum(logs) - return -log_lh.numpy() - - def gradient(self) -> np.ndarray: - raise NotImplementedError("Gradient not implemented.") + def __init__( + self, + model: Model, + dataset: DataSet, + phsp_dataset: DataSet, + phsp_volume: float = 1.0, + backend: Union[str, tuple, dict] = "numpy", + ) -> None: + self.__function = model.lambdify(backend) + self.__gradient = gradient_creator(self.__call__, backend) + backend_modules = get_backend_modules(backend) + + def find_function_in_backend(name: str) -> Callable: + if isinstance(backend_modules, dict) and name in backend_modules: + return backend_modules[name] + if isinstance(backend_modules, (tuple, list)): + for module in backend_modules: + if name in module.__dict__: + return module.__dict__[name] + raise ValueError(f"Could not find function {name} in backend") + + self.__mean_function = find_function_in_backend("mean") + self.__sum_function = find_function_in_backend("sum") + self.__log_function = find_function_in_backend("log") - @property - def parameters(self) -> dict: - return self.__model.parameters + self.__dataset = dataset + self.__phsp_dataset = phsp_dataset + self.__phsp_volume = phsp_volume + + def __call__( + self, parameters: Mapping[str, Union[float, complex]] + ) -> float: + self.__function.update_parameters(parameters) + bare_intensities = self.__function(self.__dataset) + normalization_factor = 1.0 / ( + self.__phsp_volume + * self.__mean_function(self.__function(self.__phsp_dataset)) + ) + likelihoods = normalization_factor * bare_intensities + return -self.__sum_function(self.__log_function(likelihoods)) - def update_parameters(self, new_parameters: dict) -> None: - self.__model.update_parameters(new_parameters) + def gradient( + self, parameters: Mapping[str, Union[float, complex]] + ) -> Dict[str, Union[float, complex]]: + return self.__gradient(parameters) diff --git a/src/tensorwaves/interfaces.py b/src/tensorwaves/interfaces.py index 77fd8f99..47be9f86 100644 --- a/src/tensorwaves/interfaces.py +++ b/src/tensorwaves/interfaces.py @@ -1,7 +1,9 @@ """Defines top-level interfaces of tensorwaves.""" from abc import ABC, abstractmethod -from typing import Optional, Tuple, Union +from typing import Any, Dict, FrozenSet, Mapping, Optional, Tuple, Union + +from expertsystem.amplitude.data import DataSet, MomentumPool, ScalarSequence class Function(ABC): @@ -10,64 +12,91 @@ class Function(ABC): The parameters of the model are separated from the domain variables. This follows the mathematical definition, in which a function defines its domain and parameters. However specific points in the domain are not relevant. - Hence while the domain variables are the argument of the evaluation - (see :func:`~Function.__call__`), the parameters are controlled via a - getter and setter (see :func:`~Function.parameters`). The reason for this - separation is to facilitate the events when parameters have changed. + Hence while the domain variables are the argument of the evaluation (see + :func:`~Function.__call__`), the parameters are controlled via a getter and + setter (see :func:`~Function.parameters`). The reason for this separation + is to facilitate the events when parameters have changed. """ @abstractmethod - def __call__(self, dataset: dict) -> list: + def __call__(self, dataset: DataSet) -> ScalarSequence: """Evaluate the function. Args: dataset: a `dict` with domain variable names as keys. Return: - `list` or `numpy.array` of values. - + Result of the function evaluation. Type depends on the input type. """ @property @abstractmethod - def parameters(self) -> dict: + def parameters(self) -> Dict[str, Union[float, complex]]: """Get `dict` of parameters.""" @abstractmethod - def update_parameters(self, new_parameters: dict) -> None: + def update_parameters( + self, new_parameters: Mapping[str, Union[float, complex]] + ) -> None: """Update the collection of parameters.""" -class Estimator(ABC): - """Estimator for discrepancy model and data.""" +class Model(ABC): + """Interface of a model which can be lambdified into a callable.""" @abstractmethod - def __call__(self) -> float: - """Evaluate discrepancy.""" + def lambdify(self, backend: Union[str, tuple, dict]) -> Function: + """Lambdify the model into a Function. + + Args: + backend: Choice of backend for fast evaluations. Argument is passed to + the `~.lambdify` function. + + The argument of the Function resembles a dataset in the form of a + mapping of a variable name to value type. The return value of the + Function is of a value type. + """ @abstractmethod - def gradient(self) -> list: - """Compute the gradient of the data set.""" + def performance_optimize(self, fix_inputs: DataSet) -> "Model": + """Create a performance optimized model, based on fixed inputs.""" @property @abstractmethod - def parameters(self) -> dict: - """Get `dict` of parameters.""" + def parameters(self) -> Dict[str, Union[float, complex]]: + """Get mapping of parameters to suggested initial values.""" + @property @abstractmethod - def update_parameters(self, new_parameters: dict) -> None: - """Update the collection of parameters.""" + def variables(self) -> FrozenSet[str]: + """Expected input variable names.""" + + +class Estimator(ABC): + """Estimator for discrepancy model and data.""" + + @abstractmethod + def __call__( + self, parameters: Mapping[str, Union[float, complex]] + ) -> float: + """Evaluate discrepancy.""" + + @abstractmethod + def gradient( + self, parameters: Mapping[str, Union[float, complex]] + ) -> Dict[str, Union[float, complex]]: + """Calculate gradient for given parameter mapping.""" class Kinematics(ABC): """Abstract interface for computation of kinematic variables.""" @abstractmethod - def convert(self, events: dict) -> dict: + def convert(self, events: MomentumPool) -> DataSet: """Convert a set of momentum tuples (events) to kinematic variables.""" @abstractmethod - def is_within_phase_space(self, events: dict) -> Tuple[bool]: + def is_within_phase_space(self, events: MomentumPool) -> Tuple[bool]: """Check which events lie within phase space.""" @property @@ -80,7 +109,11 @@ class Optimizer(ABC): """Optimize a fit model to a data set.""" @abstractmethod - def optimize(self, estimator: Estimator, initial_parameters: dict) -> dict: + def optimize( + self, + estimator: Estimator, + initial_parameters: Mapping[str, Union[float, complex]], + ) -> Dict[str, Any]: """Execute optimization.""" @@ -90,7 +123,7 @@ class UniformRealNumberGenerator(ABC): @abstractmethod def __call__( self, size: int, min_value: float = 0.0, max_value: float = 1.0 - ) -> Union[float, list]: + ) -> ScalarSequence: """Generate random floats in the range from [min_value,max_value).""" @property # type: ignore @@ -108,5 +141,11 @@ class PhaseSpaceGenerator(ABC): """Abstract class for generating phase space samples.""" @abstractmethod - def generate(self, size: int, rng: UniformRealNumberGenerator) -> dict: - """Generate phase space sample.""" + def generate( + self, size: int, rng: UniformRealNumberGenerator + ) -> Tuple[MomentumPool, ScalarSequence]: + """Generate phase space sample. + + Returns a `tuple` of a mapping of final state IDs to `numpy.array` s + with four-momentum tuples. + """ diff --git a/src/tensorwaves/optimizer/callbacks.py b/src/tensorwaves/optimizer/callbacks.py index f1159c1c..661dde41 100644 --- a/src/tensorwaves/optimizer/callbacks.py +++ b/src/tensorwaves/optimizer/callbacks.py @@ -177,7 +177,7 @@ def on_function_call_end(self) -> None: @staticmethod def load_latest_parameters(filename: str) -> dict: with open(filename) as stream: - fit_stats = yaml.load(stream, Loader=yaml.SafeLoader) + fit_stats = yaml.load(stream, Loader=yaml.Loader) return fit_stats["parameters"] diff --git a/src/tensorwaves/optimizer/minuit.py b/src/tensorwaves/optimizer/minuit.py index 4d117e59..37bec112 100644 --- a/src/tensorwaves/optimizer/minuit.py +++ b/src/tensorwaves/optimizer/minuit.py @@ -2,10 +2,10 @@ """Minuit2 adapter to the `iminuit.Minuit` package.""" +import logging import time -from copy import deepcopy from datetime import datetime -from typing import Dict, Optional +from typing import Any, Dict, Iterable, Mapping, Optional, Union from iminuit import Minuit from tqdm import tqdm @@ -15,31 +15,93 @@ from .callbacks import Callback, CallbackList +class ParameterFlattener: + def __init__( + self, parameters: Mapping[str, Union[float, complex]] + ) -> None: + self.__real_imag_to_complex_name = {} + self.__complex_to_real_imag_name = {} + for name, val in parameters.items(): + if isinstance(val, complex): + real_name = f"real_{name}" + imag_name = f"imag_{name}" + self.__real_imag_to_complex_name[real_name] = name + self.__real_imag_to_complex_name[imag_name] = name + self.__complex_to_real_imag_name[name] = (real_name, imag_name) + + def unflatten( + self, flattened_parameters: Dict[str, float] + ) -> Dict[str, Union[float, complex]]: + parameters: Dict[str, Union[float, complex]] = { + k: v + for k, v in flattened_parameters.items() + if k not in self.__real_imag_to_complex_name + } + for complex_name, ( + real_name, + imag_name, + ) in self.__complex_to_real_imag_name.items(): + parameters[complex_name] = complex( + flattened_parameters[real_name], + flattened_parameters[imag_name], + ) + return parameters + + def flatten( + self, parameters: Mapping[str, Union[float, complex]] + ) -> Dict[str, float]: + flattened_parameters = {} + for par_name, value in parameters.items(): + if par_name in self.__complex_to_real_imag_name: + (real_name, imag_name) = self.__complex_to_real_imag_name[ + par_name + ] + flattened_parameters[real_name] = parameters[par_name].real + flattened_parameters[imag_name] = parameters[par_name].imag + else: + flattened_parameters[par_name] = value # type: ignore + return flattened_parameters + + class Minuit2(Optimizer): """The Minuit2 adapter. Implements the `~.interfaces.Optimizer` interface. """ - def __init__(self, callback: Optional[Callback] = None) -> None: + def __init__( + self, + callback: Optional[Callback] = None, + use_analytic_gradient: bool = False, + ) -> None: self.__callback: Callback = CallbackList([]) if callback is not None: self.__callback = callback - - def optimize( - self, estimator: Estimator, initial_parameters: Dict[str, float] - ) -> dict: - parameters = deepcopy(initial_parameters) - progress_bar = tqdm() + self.__use_gradient = use_analytic_gradient + + def optimize( # pylint: disable=too-many-locals + self, + estimator: Estimator, + initial_parameters: Mapping[str, Union[complex, float]], + ) -> Dict[str, Any]: + parameter_handler = ParameterFlattener(initial_parameters) + flattened_parameters = parameter_handler.flatten(initial_parameters) + + progress_bar = tqdm( + disable=logging.getLogger().level > logging.WARNING + ) n_function_calls = 0 + def update_parameters(pars: list) -> None: + for i, k in enumerate(flattened_parameters): + flattened_parameters[k] = pars[i] + def wrapped_function(pars: list) -> float: nonlocal n_function_calls n_function_calls += 1 - for i, k in enumerate(parameters.keys()): - parameters[k] = pars[i] - estimator.update_parameters(parameters) - estimator_value = estimator() + update_parameters(pars) + parameters = parameter_handler.unflatten(flattened_parameters) + estimator_value = estimator(parameters) progress_bar.set_postfix({"estimator": estimator_value}) progress_bar.update() logs = { @@ -48,22 +110,29 @@ def wrapped_function(pars: list) -> float: "type": self.__class__.__name__, "value": float(estimator_value), }, - "parameters": { - name: float(value) for name, value in parameters.items() - }, + "parameters": parameters, } self.__callback.on_iteration_end(n_function_calls, logs) return estimator_value + def wrapped_gradient(pars: list) -> Iterable[float]: + update_parameters(pars) + parameters = parameter_handler.unflatten(flattened_parameters) + grad = estimator.gradient(parameters) + return parameter_handler.flatten(grad).values() + minuit = Minuit( wrapped_function, - tuple(parameters.values()), - name=tuple(parameters), + tuple(flattened_parameters.values()), + grad=wrapped_gradient if self.__use_gradient else None, + name=tuple(flattened_parameters), ) minuit.errors = tuple( - 0.1 * x if x != 0.0 else 0.1 for x in parameters.values() + 0.1 * x if x != 0.0 else 0.1 for x in flattened_parameters.values() ) - minuit.errordef = Minuit.LIKELIHOOD + minuit.errordef = ( + Minuit.LIKELIHOOD + ) # that error definition should be defined in the estimator start_time = time.time() minuit.migrad() @@ -73,12 +142,16 @@ def wrapped_function(pars: list) -> float: parameter_values = dict() parameter_errors = dict() - for i, name in enumerate(parameters.keys()): + for i, name in enumerate(flattened_parameters): par_state = minuit.params[i] parameter_values[name] = par_state.value parameter_errors[name] = par_state.error + parameter_values = parameter_handler.unflatten(parameter_values) + parameter_errors = parameter_handler.unflatten(parameter_errors) + return { + "minimum_valid": minuit.valid, "parameter_values": parameter_values, "parameter_errors": parameter_errors, "log_likelihood": minuit.fmin.fval, diff --git a/src/tensorwaves/physics/__init__.py b/src/tensorwaves/physics/__init__.py index 2d60eee8..c9a2bb23 100644 --- a/src/tensorwaves/physics/__init__.py +++ b/src/tensorwaves/physics/__init__.py @@ -1,7 +1,7 @@ """Evaluateable physics models for amplitude analysis.""" __all__ = [ - "helicity_formalism", + "amplitude", ] -from . import helicity_formalism +from . import amplitude diff --git a/src/tensorwaves/physics/amplitude.py b/src/tensorwaves/physics/amplitude.py new file mode 100644 index 00000000..0dfe1668 --- /dev/null +++ b/src/tensorwaves/physics/amplitude.py @@ -0,0 +1,208 @@ +"""`.Function` Adapter for `sympy`-based models.""" + +from typing import Callable, Dict, FrozenSet, Mapping, Optional, Tuple, Union + +import sympy as sp +from expertsystem.amplitude.data import DataSet, ScalarSequence + +from tensorwaves.interfaces import Function, Model + + +def get_backend_modules( + backend: Union[str, tuple, dict], +) -> Union[str, tuple, dict]: + """Preprocess the backend argument passed to `~sympy.utilities.lambdify.lambdify`. + + Note in `~sympy.utilities.lambdify.lambdify` the backend is specified via + the :code:`modules` argument. + """ + # pylint: disable=import-outside-toplevel + if isinstance(backend, str): + if backend == "jax": + from jax import numpy as jnp + from jax import scipy as jsp + from jax.config import config + + config.update("jax_enable_x64", True) + + return (jnp, jsp.special) + if backend == "numpy": + import numpy as np + + return np.__dict__ + + return backend + + +class LambdifiedFunction(Function): + def __init__( + self, + function: Callable, + argument_keys: Tuple[str, ...], + parameters: Optional[Mapping[str, Union[complex, float]]] = None, + ) -> None: + """Wrapper around a callable produced by `~sympy.utilities.lambdify.lambdify`. + + Args: + function: A callable with **positional arguments** that has been + created by `~sympy.utilities.lambdify.lambdify`. + + argument_keys: Ordered `tuple` of keys for a + `~expertsystem.amplitude.data.DataSet` and/or parameter mapping + the values of which are to be mapped onto the positional + arguments of the function. + + parameters: Mapping of parameters to their initial values. + + For more info about the intention of this class, see `.Function`. + """ + if not callable(function): + raise TypeError("Function argument is not callable") + self.__callable = function + if len(argument_keys) != len(function.__code__.co_varnames): + raise ValueError( + f"Not all {len(function.__code__.co_varnames)} variables of the" + f" function {function} are covered by {argument_keys}", + ) + self.__argument_keys = argument_keys + self.__parameters: Dict[str, Union[complex, float]] = dict() + if parameters is not None: + self.update_parameters(parameters) + + def __call__(self, dataset: DataSet) -> ScalarSequence: + dataset_keys = set(dataset) + parameter_keys = set(self.parameters) + argument_keys = set(self.__argument_keys) + if not argument_keys <= (dataset_keys | parameter_keys): + missing_keys = argument_keys ^ ( + (argument_keys & dataset_keys) + | (argument_keys & parameter_keys) + ) + raise ValueError( + "Keys of dataset and parameter mapping do not cover all " + f"function arguments. Missing argument keys: {missing_keys}." + ) + return self.__callable( + *[ + dataset[k] if k in dataset else self.__parameters[k] + for k in self.__argument_keys + ] + ) + + @property + def parameters(self) -> Dict[str, Union[float, complex]]: + return self.__parameters + + def update_parameters( + self, new_parameters: Mapping[str, Union[float, complex]] + ) -> None: + if not set(new_parameters) <= set(self.__argument_keys): + parameter_keys = set(new_parameters) + variable_keys = set(self.__argument_keys) + over_defined = parameter_keys ^ (variable_keys & parameter_keys) + raise ValueError( + f"Parameters {over_defined} do not exist in function arguments" + ) + self.__parameters.update(new_parameters) + + +class SympyModel(Model): + r"""Full definition of an arbitrary model based on `sympy`. + + Note that input for particle physics amplitude models are based on four + momenta. However, for reasons of convenience, some models may define and + use a distinct set of kinematic variables (e.g. in the helicity formalism: + angles :math:`\theta` and :math:`\phi`). In this case, a `.Kinematics` + instance (adapter) is needed to convert four momentum information into the + custom set of kinematic variables. + + Args: + expression : A sympy expression that contains the complete information + of the model based on some inputs. The inputs are defined via the + `~sympy.core.basic.Basic.free_symbols` attribute of the `sympy.Expr + `. + parameters: Defines which inputs of the model are parameters. The keys + represent the parameter set, while the values represent their default + values. Consequently the variables of the model are defined as the + intersection of the total input set with the parameter set. + """ + + def __init__( + self, + expression: sp.Expr, + parameters: Dict[sp.Symbol, Union[float, complex]], + ) -> None: + self.__expression: sp.Expr = expression.doit() + self.__parameters = parameters + self.__variables: FrozenSet[sp.Symbol] = frozenset( + self.__expression.free_symbols ^ set(self.__parameters) + ) + if not all(map(lambda p: isinstance(p, sp.Symbol), parameters)): + raise TypeError(f"Not all parameters are of type {sp.Symbol}") + + def lambdify(self, backend: Union[str, tuple, dict]) -> LambdifiedFunction: + """Lambdify the model using `~sympy.utilities.lambdify.lambdify`.""" + # pylint: disable=import-outside-toplevel + ordered_symbols = tuple(self.__variables) + tuple(self.__parameters) + + def jax_lambdify() -> Callable: + from jax import jit + + return jit( + sp.lambdify( + ordered_symbols, + self.__expression, + modules=backend_modules, + ) + ) + + def numba_lambdify() -> Callable: + # pylint: disable=import-error + from numba import jit + + return jit( + sp.lambdify( + ordered_symbols, + self.__expression, + modules="numpy", + ), + parallel=True, + ) + + backend_modules = get_backend_modules(backend) + full_function: Optional[Callable] = None + if isinstance(backend, str): + if backend == "jax": + full_function = jax_lambdify() + if backend == "numba": + full_function = numba_lambdify() + elif isinstance(backend, tuple): + if any("jax" in x.__name__ for x in backend): + full_function = jax_lambdify() + if any("numba" in x.__name__ for x in backend): + full_function = numba_lambdify() + if full_function is None: # default fallback + full_function = sp.lambdify( + ordered_symbols, + self.__expression, + modules=backend_modules, + ) + return LambdifiedFunction( + full_function, + argument_keys=tuple(s.name for s in ordered_symbols), + parameters=self.parameters, + ) + + def performance_optimize(self, fix_inputs: DataSet) -> "Model": + raise NotImplementedError + + @property + def parameters(self) -> Dict[str, Union[float, complex]]: + return { + symbol.name: value for symbol, value in self.__parameters.items() + } + + @property + def variables(self) -> FrozenSet[str]: + """Expected input variable names.""" + return frozenset({symbol.name for symbol in self.__variables}) diff --git a/src/tensorwaves/physics/helicity_formalism/__init__.py b/src/tensorwaves/physics/helicity_formalism/__init__.py deleted file mode 100644 index 61387f13..00000000 --- a/src/tensorwaves/physics/helicity_formalism/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Implementation of the helicity formalism.""" - -__all__ = [ - "amplitude", - "kinematics", -] - -from . import amplitude, kinematics diff --git a/src/tensorwaves/physics/helicity_formalism/amplitude.py b/src/tensorwaves/physics/helicity_formalism/amplitude.py deleted file mode 100644 index 739306f4..00000000 --- a/src/tensorwaves/physics/helicity_formalism/amplitude.py +++ /dev/null @@ -1,648 +0,0 @@ -"""Amplitude module for the helicity formalism. - -Its responsibility is the construction of complicated helicity formalism -amplitude models using a recipe (see `IntensityBuilder`). These models are -encapsulated in an `IntensityTF` class, which can be evaluated as a regular -callable. -""" - -import logging -from typing import ( - Callable, - Dict, - List, - NamedTuple, - Optional, - Sequence, - Tuple, - Type, -) - -import amplitf.interface as atfi -import expertsystem.amplitude.model as es -import numpy as np -import tensorflow as tf -from amplitf.dynamics import ( - blatt_weisskopf_ff_squared, - relativistic_breit_wigner, -) -from amplitf.kinematics import two_body_momentum_squared, wigner_capital_d -from expertsystem.particle import Particle, ParticleCollection -from sympy.physics.quantum.cg import CG - -from tensorwaves.interfaces import Function - -from .kinematics import HelicityKinematics, SubSystem - - -class IntensityTF(Function): - """Implementation of the `~.Function` interface using tensorflow. - - Initialize the intensity based on a tensorflow model. - - Args: - tf_model: A callable with potential tensorflow code. - parameters: The collection of parameters of the model. - - """ - - def __init__(self, tf_model: Callable, parameters: Dict[str, tf.Variable]): - self._model = tf_model - self.__parameters = parameters - - def __call__(self, dataset: Dict[str, np.ndarray]) -> np.ndarray: - """Evaluate the Intensity. - - Args: - dataset: Contains all required kinematic variables. - - Returns: - List of intensity values. - - """ - # it is crucial to convert the input data to tensors - # otherwise the memory footprint can increase dramatically - newdataset = {x: tf.constant(y) for x, y in dataset.items()} - return self._model(newdataset).numpy() - - @property - def parameters(self) -> Dict[str, tf.Variable]: - return {x: y.value().numpy() for x, y in self.__parameters.items()} - - def update_parameters(self, new_parameters: dict) -> None: - for name, value in new_parameters.items(): - self.__parameters[name].assign(value) - - -class IntensityBuilder: - """Builds Intensities from helicity formalism recipe files. - - Args: - particles: Contains info of various particles. - kinematics: A helicity kinematics instance. Note that this kinematics - instance will be modified in the process. - - phsp_data: A phase space event collection, required if a normalization - of the Intensity is performed. - - """ - - def __init__( - self, - particles: ParticleCollection, - kinematics: HelicityKinematics, - phsp_data: Optional[np.ndarray] = None, - ): - self._particles = particles - self._dynamics: Optional[es.ParticleDynamics] = None - self._kinematics = kinematics - self._parameters: Dict[str, tf.Variable] = {} - if phsp_data is None: - phsp_data = np.array([]) - self._phsp_data = phsp_data - self._registered_element_builders: Dict[Type[es.Node], Callable] = { - es.NormalizedIntensity: _create_normalized_intensity, - es.StrengthIntensity: _create_strength_intensity, - es.IncoherentIntensity: _create_incoherent_intensity, - es.CoherentIntensity: _create_coherent_intensity, - es.CoefficientAmplitude: _create_coefficient_amplitude, - es.SequentialAmplitude: _create_sequential_amplitude, - es.HelicityDecay: _create_helicity_decay, - es.CanonicalDecay: _create_helicity_decay, - } - self._registered_dynamics_builders: Dict[ - Type[es.Dynamics], Callable - ] = { - es.NonDynamic: _NonDynamic, - es.RelativisticBreitWigner: _RelativisticBreitWigner, - } - - def create_intensity(self, model: es.AmplitudeModel) -> IntensityTF: - """Create an `IntensityTF` instance based on a recipe. - - Args: - model: Contains builder instructions. These recipe files can be - generated via the expert system (see - :doc:`expertsystem:usage/workflow`). - """ - self._dynamics = model.dynamics - self._initialize_parameters(model) - return IntensityTF( - self.create_element(model.intensity), self._parameters - ) - - def create_element(self, intensity_node: es.Node) -> Callable: - """Create a computation element from the recipe. - - The recipe can only contain names registered in the pool of known - element builders. - """ - element_class = type(intensity_node) - logging.debug("creating %s", element_class) - - if element_class not in self._registered_element_builders: - raise Exception(f"Unknown element {element_class.__name__}!") - - return self._registered_element_builders[element_class]( - self, intensity_node, kinematics=self._kinematics - ) - - def create_dynamics( - self, - decaying_state: Particle, - dynamics_properties: "DynamicsProperties", - ) -> Callable: - """Create a dynamics function callable.""" - if self._dynamics is None: - raise ValueError("Dynamics has not yet been set") - decay_dynamics = self._dynamics[decaying_state.name] - - kwargs = {} - form_factor = getattr(decay_dynamics, "form_factor", None) - if ( - form_factor is not None - and not dynamics_properties.orbit_angular_momentum.is_integer() - ): - raise ValueError( - "Model invalid! Using a non integer value for the orbital" - " angular momentum L. Seems like you are using the helicity" - " formalism, but should be using the canonical formalism" - ) - if isinstance(form_factor, es.BlattWeisskopf): - kwargs.update({"form_factor": "BlattWeisskopf"}) - meson_radius_val = form_factor.meson_radius.value - meson_radius_par = self.register_parameter( - f"MesonRadius_{decaying_state.name}", - meson_radius_val, - ) - dynamics_properties = dynamics_properties._replace( - meson_radius=meson_radius_par - ) - - dynamics_builder = self._get_dynamics_builder(decaying_state.name) - return dynamics_builder(dynamics_properties, **kwargs) - - def register_dynamics_builder( - self, - dynamics_name: Type[es.Dynamics], - builder: Callable[[str, "DynamicsProperties"], Callable], - ) -> None: - """Register custom dynamics function builders.""" - if dynamics_name in self._registered_dynamics_builders: - logging.warning( - "Overwriting previously defined builder for %s", dynamics_name - ) - self._registered_dynamics_builders[dynamics_name] = builder - - def _get_dynamics_builder( - self, decaying_state_name: str - ) -> Callable[..., Callable]: - if self._dynamics is None: - raise ValueError("Dynamics has not been set") - - dynamics = self._dynamics[decaying_state_name] - if type(dynamics) not in self._registered_dynamics_builders: - raise ValueError( - f"Dynamics ({dynamics.__class__.__name__}) unknown. " - f"Use one of the following: \n" - f"{list(self._registered_dynamics_builders.keys())}" - ) - return self._registered_dynamics_builders[type(dynamics)] - - def register_parameter(self, name: str, value: float) -> tf.Variable: - if name not in self._parameters: - self._parameters[name] = tf.Variable( - value, name=name, dtype=tf.float64 - ) - return self._parameters[name] - - def get_parameter(self, name: str) -> tf.Variable: - if name not in self._parameters: - raise Exception(f'Parameter "{name}" not registered') - - return self._parameters[name] - - def _initialize_parameters(self, model: es.AmplitudeModel) -> None: - parameters: List[es.FitParameter] = list(model.parameters.values()) - for par in parameters: - self._parameters[par.name] = tf.Variable( - par.value, name=par.name, dtype=tf.float64 - ) - - def get_normalization_data(self) -> Tuple[dict, float]: - """Return phase space dataset and its volume.""" - if self._phsp_data.size == 0: - raise Exception( - "No phase space sample given! This is required for the " - "normalization." - ) - return ( - self._kinematics.convert(self._phsp_data), - self._kinematics.phase_space_volume, - ) - - -class _NormalizedIntensity: - def __init__( - self, - unnormalized_intensity: Callable, - norm_dataset: dict, - norm_volume: float = 1.0, - ) -> None: - self._model = unnormalized_intensity - self._norm_dataset = norm_dataset - self._norm_volume = norm_volume - - @tf.function - def __call__(self, dataset: dict) -> tf.Tensor: - normalization = tf.multiply( - self._norm_volume, - tf.reduce_mean(self._model(self._norm_dataset)), - ) - return tf.divide(self._model(dataset), normalization) - - -def _create_normalized_intensity( - builder: IntensityBuilder, node: es.Node, **_: dict -) -> Callable: - if not isinstance(node, es.NormalizedIntensity): - raise TypeError( - f"Requires {es.NormalizedIntensity.__class__.__name__}" - ) - model = builder.create_element(node.intensity) - dataset, volume = builder.get_normalization_data() - # its important to convert the dataset to tf tensors (memory footprint) - dataset = {x: tf.constant(y) for x, y in dataset.items()} - return _NormalizedIntensity(model, dataset, atfi.const(volume)) - - -class _StrengthIntensity: - def __init__(self, intensity: Callable, strength: tf.Variable) -> None: - self._strength = strength - self._intensity = intensity - - def __call__(self, dataset: dict) -> tf.Tensor: - return self._strength * self._intensity(dataset) - - -def _create_strength_intensity( - builder: IntensityBuilder, node: es.Node, **_: dict -) -> Callable: - if not isinstance(node, es.StrengthIntensity): - raise TypeError - strength = builder.get_parameter(node.strength.name) - intensity = builder.create_element(node.intensity) - return _StrengthIntensity(intensity, strength) - - -class _IncoherentIntensity: - def __init__(self, intensities: List[Callable]) -> None: - self._intensities = intensities - - def __call__(self, dataset: dict) -> tf.Tensor: - return tf.math.accumulate_n([y(dataset) for y in self._intensities]) - - -def _create_incoherent_intensity( - builder: IntensityBuilder, node: es.Node, **_: dict -) -> Callable: - if not isinstance(node, es.IncoherentIntensity): - raise TypeError - intensities = [builder.create_element(x) for x in node.intensities] - return _IncoherentIntensity(intensities) - - -class _CoherentIntensity: - def __init__(self, amplitudes: List[Callable]) -> None: - self._amps = amplitudes - - def __call__(self, dataset: dict) -> tf.Tensor: - return tf.pow( - tf.cast( # pylint: disable=no-value-for-parameter,unexpected-keyword-arg - tf.abs(tf.add_n([amp(dataset) for amp in self._amps])), - dtype=tf.float64, - ), - tf.constant(2.0, dtype=tf.float64), - ) - - -def _create_coherent_intensity( - builder: IntensityBuilder, node: es.Node, **_: dict -) -> Callable: - if not isinstance(node, es.CoherentIntensity): - raise TypeError - amplitudes = [builder.create_element(x) for x in node.amplitudes] - return _CoherentIntensity(amplitudes) - - -class _CoefficientAmplitude: - def __init__( - self, - amplitude: Callable, - mag: tf.Variable, - phase: tf.Variable, - pre_factor: Optional[float] = None, - ): - self._mag = mag - self._phase = phase - self._amp = amplitude - if pre_factor is None: - self._pre_factor = tf.constant(1.0, dtype=tf.float64) - else: - self._pre_factor = tf.constant(pre_factor, dtype=tf.float64) - - def __call__(self, dataset: dict) -> tf.Tensor: - coefficient = atfi.polar(self._pre_factor * self._mag, self._phase) - return coefficient * self._amp(dataset) - - -def _create_coefficient_amplitude( - builder: IntensityBuilder, node: es.Node, **_: dict -) -> Callable: - if not isinstance(node, es.CoefficientAmplitude): - raise TypeError - magnitude = builder.get_parameter(node.magnitude.name) - phase = builder.get_parameter(node.phase.name) - amplitude = builder.create_element(node.amplitude) - return _CoefficientAmplitude(amplitude, magnitude, phase, node.prefactor) - - -class _SequentialAmplitude: - def __init__(self, amplitudes: List[Callable]) -> None: - self._seq_amps = amplitudes - - def __call__(self, dataset: dict) -> tf.Tensor: - seq_amp = atfi.complex(atfi.const(1.0), atfi.const(0.0)) - for amp in self._seq_amps: - seq_amp = seq_amp * amp(dataset) - return seq_amp - - -def _create_sequential_amplitude( - builder: IntensityBuilder, node: es.Node, **_: dict -) -> Callable: - if not isinstance(node, es.SequentialAmplitude): - raise TypeError - if len(node.amplitudes) == 0: - raise Exception( - "Sequential Amplitude requires a non-empty list of amplitudes!" - ) - return _SequentialAmplitude( - [builder.create_element(x) for x in node.amplitudes] - ) - - -class _AngularProperties(NamedTuple): - j: float - m: float - mprime: float - theta_name: str - phi_name: str - - -class DynamicsProperties(NamedTuple): - """Data structure representing dynamic properties.""" - - orbit_angular_momentum: float - resonance_mass: float - resonance_width: float - inv_mass_name: str - inv_mass_name_prod1: str - inv_mass_name_prod2: str - meson_radius: Optional[float] - - -class _RelativisticBreitWigner: - def __init__( - self, - dynamics_props: DynamicsProperties, - form_factor: Optional[es.FormFactor] = None, - ) -> None: - self._dynamics_props = dynamics_props - self._call_wrapper = self._without_form_factor - if isinstance(form_factor, es.BlattWeisskopf): - self._call_wrapper = self._with_form_factor - - def __call__(self, dataset: dict) -> tf.Tensor: - return self._call_wrapper(dataset) - - def _without_form_factor(self, dataset: dict) -> tf.Tensor: - mass0 = self._dynamics_props.resonance_mass - gamma0 = self._dynamics_props.resonance_width - return ( - relativistic_breit_wigner( - dataset[self._dynamics_props.inv_mass_name], - self._dynamics_props.resonance_mass, - self._dynamics_props.resonance_width, - ) - * atfi.complex(mass0 * gamma0, atfi.const(0.0)) - ) - - def _with_form_factor(self, dataset: dict) -> tf.Tensor: - inv_mass_squared = dataset[self._dynamics_props.inv_mass_name] - inv_mass = atfi.sqrt(inv_mass_squared) - mass0 = self._dynamics_props.resonance_mass - gamma0 = self._dynamics_props.resonance_width - m_a = atfi.sqrt(dataset[self._dynamics_props.inv_mass_name_prod1]) - m_b = atfi.sqrt(dataset[self._dynamics_props.inv_mass_name_prod2]) - meson_radius = self._dynamics_props.meson_radius - l_orbit = self._dynamics_props.orbit_angular_momentum - q_squared = two_body_momentum_squared(inv_mass, m_a, m_b) - q0_squared = two_body_momentum_squared(mass0, m_a, m_b) - ff2 = blatt_weisskopf_ff_squared(q_squared, meson_radius, l_orbit) - ff02 = blatt_weisskopf_ff_squared(q0_squared, meson_radius, l_orbit) - width = gamma0 * (mass0 / inv_mass) * (ff2 / ff02) - # So far its all in float64, - # but for the sqrt operation it has to be converted to complex - width = atfi.complex( - width, tf.constant(0.0, dtype=tf.float64) - ) * atfi.sqrt( - atfi.complex( - (q_squared / q0_squared), - tf.constant(0.0, dtype=tf.float64), - ) - ) - return relativistic_breit_wigner( - inv_mass_squared, mass0, width - ) * atfi.complex(mass0 * gamma0 * atfi.sqrt(ff2), atfi.const(0.0)) - - -class _NonDynamic: - def __init__( - self, - dynamics_props: DynamicsProperties, - form_factor: Optional[es.FormFactor] = None, - ) -> None: - self._dynamics_props = dynamics_props - self._call_wrapper: Callable[ - [dict], tf.Tensor - ] = self._without_form_factor - if isinstance(form_factor, es.BlattWeisskopf): - self._call_wrapper = self._with_form_factor - - def __call__(self, dataset: dict) -> tf.Tensor: - return self._call_wrapper(dataset) - - @staticmethod - def _without_form_factor(_: dict) -> tf.Tensor: - return tf.complex( - tf.constant(1.0, dtype=tf.float64), - tf.constant(0.0, dtype=tf.float64), - ) - - def _with_form_factor(self, dataset: dict) -> tf.Tensor: - inv_mass = atfi.sqrt(dataset[self._dynamics_props.inv_mass_name]) - m_a = atfi.sqrt(dataset[self._dynamics_props.inv_mass_name_prod1]) - m_b = atfi.sqrt(dataset[self._dynamics_props.inv_mass_name_prod2]) - meson_radius = self._dynamics_props.meson_radius - l_orbit = self._dynamics_props.orbit_angular_momentum - - q_squared = two_body_momentum_squared(inv_mass, m_a, m_b) - - return atfi.complex( - atfi.sqrt( - blatt_weisskopf_ff_squared(q_squared, meson_radius, l_orbit) - ), - atfi.const(0.0), - ) - - -class _HelicityDecay: - def __init__( - self, - angular_params: "_AngularProperties", - dynamics_function: Callable, - prefactor: float = 1.0, - ) -> None: - self._params = angular_params - self._dynamics_function = dynamics_function - self._prefactor = prefactor - - def __call__(self, dataset: dict) -> tf.Tensor: - return ( - self._prefactor - * wigner_capital_d( - dataset[self._params.phi_name], - dataset[self._params.theta_name], - 0.0, - int(2 * self._params.j), - int(2 * self._params.m), - int(2 * self._params.mprime), - ) - * self._dynamics_function(dataset) - ) - - -def _clebsch_gordan_coefficient(clebsch_gordan: es.ClebschGordan) -> float: - return ( - CG( - j1=clebsch_gordan.j_1, - m1=clebsch_gordan.m_1, - j2=clebsch_gordan.j_2, - m2=clebsch_gordan.m_2, - j3=clebsch_gordan.J, - m3=clebsch_gordan.M, - ) - .doit() - .evalf() - ) - - -def _determine_canonical_prefactor(node: es.CanonicalDecay) -> float: - l_s = _clebsch_gordan_coefficient(node.l_s) - s2s3 = _clebsch_gordan_coefficient(node.s2s3) - return float(l_s * s2s3) - - -def _create_helicity_decay( # pylint: disable=too-many-locals - builder: IntensityBuilder, - node: es.Node, - kinematics: HelicityKinematics, -) -> Callable: - if not isinstance(node, (es.HelicityDecay, es.CanonicalDecay)): - raise TypeError - decaying_state = node.decaying_particle - decay_products = node.decay_products - dec_prod_fs_ids = [x.final_state_ids for x in decay_products] - - recoil_final_state = [] - parent_recoil_final_state = [] - recoil_system = node.recoil_system - if recoil_system is not None: - recoil_final_state = recoil_system.recoil_final_state - if recoil_system.parent_recoil_final_state is not None: - parent_recoil_final_state = recoil_system.parent_recoil_final_state - - inv_mass_name, theta_name, phi_name = kinematics.register_subsystem( - SubSystem( - dec_prod_fs_ids, - recoil_final_state, - parent_recoil_final_state, - ) - ) - - particle = decaying_state.particle - j = particle.spin - - prefactor = 1.0 - if isinstance(node, es.CanonicalDecay): - prefactor = _determine_canonical_prefactor(node) - - dynamics = _create_dynamics( - builder, - node, - dec_prod_fs_ids, - decaying_state, - inv_mass_name, - kinematics, - ) - - return _HelicityDecay( - _AngularProperties( - j=j, - m=decaying_state.helicity, - mprime=decay_products[0].helicity - decay_products[1].helicity, - theta_name=theta_name, - phi_name=phi_name, - ), - dynamics, - prefactor=prefactor, - ) - - -def _create_dynamics( - builder: IntensityBuilder, - amplitude_node: es.AmplitudeNode, - dec_prod_fs_ids: Sequence, - decaying_state: es.HelicityParticle, - inv_mass_name: str, - kinematics: HelicityKinematics, -) -> Callable: - particle = decaying_state.particle - orbit_angular_momentum = particle.spin - if isinstance(amplitude_node, es.CanonicalDecay): - orbit_angular_momentum = amplitude_node.l_s.j_1 - - dynamics = builder.create_dynamics( - particle, - DynamicsProperties( - orbit_angular_momentum=orbit_angular_momentum, - resonance_mass=builder.register_parameter( - f"Position_{particle.name}", - particle.mass, - ), - resonance_width=builder.register_parameter( - f"Width_{particle.name}", - particle.width, - ), - inv_mass_name=inv_mass_name, - inv_mass_name_prod1=kinematics.register_invariant_mass( - dec_prod_fs_ids[0] - ), - inv_mass_name_prod2=kinematics.register_invariant_mass( - dec_prod_fs_ids[1] - ), - meson_radius=None, - ), - ) - return dynamics diff --git a/src/tensorwaves/physics/helicity_formalism/kinematics.py b/src/tensorwaves/physics/helicity_formalism/kinematics.py deleted file mode 100644 index 01ed5d14..00000000 --- a/src/tensorwaves/physics/helicity_formalism/kinematics.py +++ /dev/null @@ -1,382 +0,0 @@ -r"""Kinematic based calculations for the helicity formalism. - -It's responsibilities are defined by the interface -:class:`.interfaces.Kinematics`. - -Here, the main responsibility is the conversion of general kinematic -information of a reaction to helicity formalism specific quantities - -:math:`(s, \theta, \phi)` - -The basic building blocks are the :class:`~HelicityKinematics` and -:class:`~SubSystem`. -""" -import logging -from collections import abc -from typing import Dict, List, Optional, Sequence, Tuple - -import amplitf.kinematics as tfa_kin -import numpy as np -from expertsystem.amplitude.model import AmplitudeModel -from expertsystem.particle import ParticleCollection - -from tensorwaves.interfaces import Kinematics - - -class ParticleReactionKinematicsInfo: - r"""Contains boundary condition information of a particle reaction. - - Args: - initial_state_names: Defines the initial state - final_state_names: Defines the final state - particle_dict: Contains particle information - total_invariant_mass: Invariant mass :math:`\sqrt(s)` of the initial or - final state. Has to be specified for a multi particle initial state. - - fs_id_event_pos_mapping: Mapping between particle IDs and their - positions in an event collection. - - """ - - def __init__( - self, - initial_state_names: List[str], - final_state_names: List[str], - particles: ParticleCollection, - total_invariant_mass: Optional[float] = None, - fs_id_event_pos_mapping: Optional[Dict[int, int]] = None, - ): - if isinstance(initial_state_names, str): - initial_state_names = (initial_state_names,) - if len(initial_state_names) == 0: - raise ValueError("initial_state_names cannot be empty!") - if len(final_state_names) == 0: - raise ValueError("final_state_names cannot be empty!") - - self._initial_state_particles = [ - particles[x] for x in initial_state_names - ] - self._final_state_particles = [particles[x] for x in final_state_names] - - if len(self._initial_state_particles) == 1: - if total_invariant_mass: - logging.warning( - "Total invariant mass sqrt(s) given with a single particle" - " initial state! Using given sqrt(s)!" - ) - else: - mass = self._initial_state_particles[0].mass - self._total_invariant_mass = mass - else: - if not total_invariant_mass: - raise ValueError("Total invariant mass sqrt(s) not given!") - self._total_invariant_mass = total_invariant_mass - - self._fs_id_event_pos_mapping = fs_id_event_pos_mapping - - @classmethod - def from_model( - cls, model: AmplitudeModel - ) -> "ParticleReactionKinematicsInfo": - """Initialize from a recipe dictionary.""" - particles = model.particles - fi_state = model.kinematics.final_state - in_state = model.kinematics.initial_state - fs_id_event_pos_mapping = { - state_id: pos for pos, state_id in enumerate(fi_state) - } - return cls( - initial_state_names=[p.name for p in in_state.values()], - final_state_names=[p.name for p in fi_state.values()], - particles=particles, - fs_id_event_pos_mapping=fs_id_event_pos_mapping, - ) - - @property - def initial_state_masses(self) -> List[float]: - return [p.mass for p in self._initial_state_particles] - - @property - def final_state_masses(self) -> List[float]: - return [p.mass for p in self._final_state_particles] - - @property - def total_invariant_mass(self) -> float: - return self._total_invariant_mass - - @property - def fs_id_event_pos_mapping(self) -> Optional[Dict[int, int]]: - return self._fs_id_event_pos_mapping - - -class SubSystem(abc.Hashable): - """Represents a part of a decay chain. - - A SubSystem resembles a decaying state and its ingoing and outgoing state. - It is uniquely defined by: - - * :attr:`final_states` - * :attr:`recoil_state` - * :attr:`parent_recoil_state` - """ - - def __init__( - self, - final_states: Sequence[Sequence[int]], - recoil_state: Sequence[int], - parent_recoil_state: Sequence[int], - ) -> None: - """Fully initialize the :class:`SubSystem`. - - Args: - final_states: `tuple` of `tuple` s containing unique ids. - Represents the final state content of the decay products. - recoil_state: `tuple` of unique ids representing the recoil partner - of the decaying state. - parent_recoil_state: `tuple` of unique ids representing the recoil - partner of the parent state. - - """ - self._final_states = tuple(tuple(x) for x in final_states) - self._recoil_state = tuple(recoil_state) - self._parent_recoil_state = tuple(parent_recoil_state) - - @property - def final_states(self) -> Tuple[tuple, ...]: - """Get final state content of the decay products.""" - return self._final_states - - @property - def recoil_state(self) -> tuple: - """Get final state content of the recoil partner.""" - return self._recoil_state - - @property - def parent_recoil_state(self) -> tuple: - """Get final state content of the recoil partner of the parent.""" - return self._parent_recoil_state - - def __eq__(self, other: object) -> bool: - """Equal testing operator.""" - if not isinstance(other, SubSystem): - raise NotImplementedError - if self._final_states != other._final_states: - return False - if self._recoil_state != other._recoil_state: - return False - if self._parent_recoil_state != other._parent_recoil_state: - return False - return True - - def __hash__(self) -> int: - """Hash function to use SubSystem as key.""" - return hash( - (self._final_states, self._recoil_state, self._parent_recoil_state) - ) - - -class HelicityKinematics(Kinematics): - """Kinematics of the helicity formalism. - - General usage is - - 1. Register kinematic variables via the three methods - (:meth:`register_invariant_mass`, :meth:`register_helicity_angles`, - :meth:`register_subsystem`) first. - 2. Then convert events to these kinematic variables. - - For additional functionality check :meth:`phase_space_volume` and - :meth:`is_within_phase_space`. - """ - - def __init__(self, reaction_info: ParticleReactionKinematicsInfo): - """Initialize the a blank HelicityKinematics. - - Args: - reaction_info: data structure that contains all of the kinematic - information of the particle reaction. - - """ - self._reaction_info = reaction_info - self._registered_inv_masses: Dict[Tuple, str] = dict() - self._registered_subsystems: Dict[SubSystem, Tuple[str, str]] = dict() - - @classmethod - def from_model(cls, model: AmplitudeModel) -> "HelicityKinematics": - return cls(ParticleReactionKinematicsInfo.from_model(model)) - - @property - def reaction_kinematics_info(self) -> ParticleReactionKinematicsInfo: - return self._reaction_info - - @property - def phase_space_volume(self) -> float: - return 1.0 - - def is_within_phase_space(self, events: np.ndarray) -> Tuple[bool]: - """Check whether events lie within the phase space definition.""" - raise NotImplementedError - - def register_invariant_mass(self, final_state: Sequence) -> str: - """Register an invariant mass :math:`s`. - - Args: - final_state: collection of particle unique id's - - Return: - A `str` key representing the invariant mass. It can be used to - retrieve this invariant mass from the dataset returned by - :meth:`~convert`. - - """ - logging.debug("registering inv mass in kinematics") - _final_state: tuple = tuple(sorted(final_state)) - if _final_state not in self._registered_inv_masses: - label = "mSq_" - for particle_uid in _final_state: - label += str(particle_uid) + "+" - label = label[:-1] - self._registered_inv_masses[_final_state] = label - return self._registered_inv_masses[_final_state] - - def register_helicity_angles( - self, subsystem: SubSystem - ) -> Tuple[str, str]: - r"""Register helicity angles :math:`(\theta, \phi)` of a `SubSystem`. - - Args: - subsystem: SubSystem to which the registered angles correspond. - - Return: - A pair of `str` keys representing the angles. They can be used to - retrieve the angles from the dataset returned by :meth:`~convert`. - - """ - logging.debug("registering helicity angles in kinematics") - if subsystem not in self._registered_subsystems: - suffix = "" - for final_state in subsystem.final_states: - suffix += "_" - for particle_uid in final_state: - suffix += str(particle_uid) + "+" - suffix = suffix[:-1] - if subsystem.recoil_state: - suffix += "_vs_" - for particle_uid in subsystem.recoil_state: - suffix += str(particle_uid) + "+" - suffix = suffix[:-1] - - self._registered_subsystems[subsystem] = ( - "theta" + suffix, - "phi" + suffix, - ) - return self._registered_subsystems[subsystem] - - def register_subsystem(self, subsystem: SubSystem) -> Tuple[str, ...]: - r"""Register all kinematic variables of the :class:`~SubSystem`. - - Args: - subsystem: SubSystem to which the registered kinematic variables - correspond. - - Return: - A tuple of `str` keys representing the :math:`(s, \theta, \phi)`. - They can be used to retrieve the kinematic data from the dataset - returned by :meth:`~convert`. - - """ - state_fs: list = [] - for fs_uid in subsystem.final_states: - state_fs += fs_uid - invmass_name = self.register_invariant_mass(list(set(state_fs))) - angle_names = self.register_helicity_angles(subsystem) - - return (invmass_name,) + angle_names - - def _convert_ids_to_indices(self, ids: Tuple[int, ...]) -> Tuple[int, ...]: - if self._reaction_info.fs_id_event_pos_mapping: - return tuple( - self._reaction_info.fs_id_event_pos_mapping[i] for i in ids - ) - return ids - - def convert(self, events: np.ndarray) -> dict: - r"""Convert events to the registered kinematics variables. - - Args: - events: A three dimensional numpy array of the shape - :math:`(n_{\mathrm{part}}, n_{\mathrm{events}}, 4)`. - - * :math:`n_{\mathrm{part}}` is the number of particles - * :math:`n_{\mathrm{events}}` is the number of events - - The third dimension correspond to the four momentum info - :math:`(p_x, p_y, p_z, E)`. - - Return: - A `dict` containing the registered kinematic variables as keys - and their corresponding values. This is also known as a dataset. - - """ - logging.debug("converting %s events", len(events[0])) - - dataset = {} - - for ( - four_momenta_ids, - inv_mass_name, - ) in self._registered_inv_masses.items(): - if len(four_momenta_ids) == 1: - index = self._convert_ids_to_indices(four_momenta_ids)[0] - - dataset[inv_mass_name] = np.square( - np.array(self._reaction_info.final_state_masses[index]) - ) - - else: - four_momenta = np.sum( - events[self._convert_ids_to_indices(four_momenta_ids), :], - axis=0, - ) - - dataset[inv_mass_name] = tfa_kin.mass_squared( - np.array(four_momenta) - ).numpy() - - for subsys, angle_names in self._registered_subsystems.items(): - topology = [ - np.sum(events[self._convert_ids_to_indices(x), :], axis=0) - for x in subsys.final_states - ] - if subsys.recoil_state: - topology = [ - topology, - np.sum( - events[ - self._convert_ids_to_indices(subsys.recoil_state), - :, - ], - axis=0, - ), - ] - if subsys.parent_recoil_state: - topology = [ - topology, - np.sum( - events[ - self._convert_ids_to_indices( - subsys.parent_recoil_state - ), - :, - ], - axis=0, - ), - ] - - values = tfa_kin.nested_helicity_angles(topology) - - # the last two angles is always what we are interested - dataset[angle_names[0]] = values[-2].numpy() - dataset[angle_names[1]] = values[-1].numpy() - - return dataset diff --git a/tests/conftest.py b/tests/conftest.py index 4c807090..75372254 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,29 +1,27 @@ # pylint: disable=redefined-outer-name -from copy import deepcopy +from typing import Any, Dict import expertsystem as es -import numpy as np import pytest -from expertsystem.amplitude.model import AmplitudeModel +from expertsystem.amplitude.data import DataSet, MomentumPool +from expertsystem.amplitude.dynamics.builder import ( + create_relativistic_breit_wigner_with_ff, +) +from expertsystem.amplitude.helicity import HelicityModel +from expertsystem.amplitude.kinematics import HelicityKinematics from expertsystem.particle import ParticleCollection from tensorwaves.data.generate import generate_data, generate_phsp from tensorwaves.data.tf_phasespace import TFUniformRealNumberGenerator -from tensorwaves.estimator import UnbinnedNLL +from tensorwaves.estimator import SympyUnbinnedNLL from tensorwaves.optimizer.callbacks import ( CallbackList, CSVSummary, YAMLSummary, ) from tensorwaves.optimizer.minuit import Minuit2 -from tensorwaves.physics.helicity_formalism.amplitude import ( - IntensityBuilder, - IntensityTF, -) -from tensorwaves.physics.helicity_formalism.kinematics import ( - HelicityKinematics, -) +from tensorwaves.physics.amplitude import SympyModel N_PHSP_EVENTS = int(1e5) N_DATA_EVENTS = int(1e4) @@ -41,79 +39,85 @@ def output_dir(pytestconfig) -> str: @pytest.fixture(scope="session") -def helicity_model() -> AmplitudeModel: - return __create_model(formalism="helicity") +def helicity_model() -> SympyModel: + model = __create_model(formalism="helicity") + return SympyModel( + expression=model.expression, + parameters=model.parameters, + ) @pytest.fixture(scope="session") -def canonical_model() -> AmplitudeModel: - return __create_model(formalism="canonical-helicity") +def canonical_model() -> SympyModel: + model = __create_model(formalism="canonical-helicity") + return SympyModel( + expression=model.expression, + parameters=model.parameters, + ) @pytest.fixture(scope="session") -def kinematics(helicity_model: AmplitudeModel) -> HelicityKinematics: - return HelicityKinematics.from_model(helicity_model) +def kinematics() -> HelicityKinematics: + model = __create_model(formalism="helicity") + return model.kinematics @pytest.fixture(scope="session") -def phsp_sample(kinematics: HelicityKinematics) -> np.ndarray: +def phsp_sample(kinematics: HelicityKinematics) -> MomentumPool: return generate_phsp(N_PHSP_EVENTS, kinematics, random_generator=RNG) @pytest.fixture(scope="session") -def phsp_set(kinematics: HelicityKinematics, phsp_sample: np.ndarray) -> dict: +def phsp_set( + kinematics: HelicityKinematics, phsp_sample: MomentumPool +) -> DataSet: return kinematics.convert(phsp_sample) -@pytest.fixture(scope="session") -def intensity( - helicity_model: AmplitudeModel, - kinematics: HelicityKinematics, - phsp_sample: np.ndarray, -) -> IntensityTF: - # https://github.com/ComPWA/tensorwaves/issues/171 - model = deepcopy(helicity_model) - builder = IntensityBuilder(model.particles, kinematics, phsp_sample) - return builder.create_intensity(model) - - @pytest.fixture(scope="session") def data_sample( kinematics: HelicityKinematics, - intensity: IntensityTF, -) -> np.ndarray: + helicity_model: SympyModel, +) -> MomentumPool: + callable_model = helicity_model.lambdify(backend="numpy") return generate_data( - N_DATA_EVENTS, kinematics, intensity, random_generator=RNG + N_DATA_EVENTS, kinematics, callable_model, random_generator=RNG ) @pytest.fixture(scope="session") def data_set( kinematics: HelicityKinematics, - data_sample: np.ndarray, -) -> dict: + data_sample: MomentumPool, +) -> DataSet: return kinematics.convert(data_sample) @pytest.fixture(scope="session") def estimator( - intensity: IntensityTF, data_set: dict, phsp_set: dict -) -> UnbinnedNLL: - return UnbinnedNLL(intensity, data_set, phsp_set) + helicity_model: SympyModel, data_set: DataSet, phsp_set: DataSet +) -> SympyUnbinnedNLL: + return SympyUnbinnedNLL( + helicity_model, + data_set, + phsp_set, + ) @pytest.fixture(scope="session") -def free_parameters() -> dict: +def free_parameters() -> Dict[str, float]: return { - "Width_f(0)(500)": 0.3, - "Position_f(0)(980)": 1, + "Gamma_f(0)(500)": 0.3, + "m_f(0)(980)": 1, } @pytest.fixture(scope="session") def fit_result( - estimator: UnbinnedNLL, free_parameters: dict, output_dir: str -) -> dict: + estimator: SympyUnbinnedNLL, + free_parameters: Dict[str, float], + output_dir: str, +) -> Dict[str, Any]: optimizer = Minuit2( callback=CallbackList( [ @@ -125,7 +129,7 @@ def fit_result( return optimizer.optimize(estimator, free_parameters) -def __create_model(formalism: str) -> AmplitudeModel: +def __create_model(formalism: str) -> HelicityModel: result = es.generate_transitions( initial_state=("J/psi(1S)", [-1, +1]), final_state=["gamma", "pi0", "pi0"], @@ -138,7 +142,9 @@ def __create_model(formalism: str) -> AmplitudeModel: allowed_interaction_types=["EM", "strong"], number_of_threads=1, ) - model = es.generate_amplitudes(result) + model_builder = es.amplitude.get_builder(result) for name in result.get_intermediate_particles().names: - model.dynamics.set_breit_wigner(name) - return model + model_builder.set_dynamics( + name, create_relativistic_breit_wigner_with_ff + ) + return model_builder.generate() diff --git a/tests/data/test_generate.py b/tests/data/test_generate.py index 02e0b7f4..8cb75369 100644 --- a/tests/data/test_generate.py +++ b/tests/data/test_generate.py @@ -1,138 +1,160 @@ # cspell:ignore tolist from pprint import pprint +from typing import Sequence import numpy as np import pytest +from expertsystem.amplitude.data import MomentumPool +from expertsystem.amplitude.kinematics import HelicityKinematics, ReactionInfo +from expertsystem.particle import ParticleCollection from tensorwaves.data.generate import generate_phsp from tensorwaves.data.tf_phasespace import TFUniformRealNumberGenerator -from tensorwaves.physics.helicity_formalism.kinematics import ( - HelicityKinematics, - ParticleReactionKinematicsInfo, -) -def test_generate_data(data_sample: np.ndarray): - sub_sample = data_sample[:, :5, :] - print("Expected list, get by running pytest with the -s flag") - pprint(np.round(sub_sample, decimals=11).tolist()) - assert pytest.approx(sub_sample) == [ - [ - [-1.11655564285, -0.65098757279, -0.29889930779, 1.32658287329], - [0.6614259842, -0.67081160216, 0.91253074716, 1.31155819645], - [-0.89388082549, 0.0142008738, 0.7940946521, 1.19574700982], - [1.09520656083, -0.40782669601, -0.71221082709, 1.36859208207], - [-0.91362335417, 0.65774093748, -0.03330470173, 1.12625040612], +def test_generate_data(data_sample: MomentumPool): + sub_sample = { + i: four_momenta[:5, :] for i, four_momenta in data_sample.items() + } + print("Expected shape, get by running pytest with the -s flag") + pprint( + { + i: np.round(four_momenta, decimals=11).tolist() + for i, four_momenta in sub_sample.items() + } + ) + expected = { + 0: [ + [1.50757377596, 0.37918944935, 0.73396599969, 1.26106620078], + [1.41389525301, -0.07315064441, -0.21998573758, 1.39475985207], + [1.52128570461, 0.06569896528, -1.51812710851, 0.0726906006], + [1.51480310845, 1.40672331053, 0.49678572189, -0.26260603856], + [1.52384281483, 0.79694939592, 1.29832389761, -0.03638188481], ], - [ - [0.99646172468, 0.95461505668, 0.29199575453, 1.4169354722], - [-0.12901200642, 0.8756471174, -0.77276118186, 1.18270053384], - [-0.04750914038, 0.01974185344, -0.92886918322, 0.94003380539], - [-0.67129360197, 0.03043449972, 0.88545452402, 1.11973649108], - [0.68138748966, -1.08237744503, -0.3566346599, 1.33462985945], + 1: [ + [1.42066087326, -0.34871369761, -0.72119471428, -1.1654765212], + [0.96610319301, -0.26739932067, -0.15455480956, -0.90539883872], + [0.60647770024, 0.11616448713, 0.57584161239, -0.06714695611], + [1.01045883083, -0.88651015826, -0.46024226278, 0.0713099651], + [1.04324742713, -0.48051670276, -0.91259832182, -0.08009031815], ], - [ - [0.12009391817, -0.3036274839, 0.00690355325, 0.35338165451], - [-0.53241397778, -0.20483551524, -0.1397695653, 0.60264126971], - [0.94138996588, -0.03394272724, 0.13477453112, 0.96111918479], - [-0.42391295886, 0.37739219629, -0.17324369693, 0.60857142685], - [0.2322358645, 0.42463650754, 0.38993936163, 0.63601973443], + 2: [ + [0.16866535079, -0.03047575173, -0.01277128542, -0.09558967958], + [0.71690155399, 0.34054996508, 0.37454054715, -0.48936101336], + [0.96913659515, -0.18186345241, 0.94228549612, -0.00554364449], + [0.57163806072, -0.52021315227, -0.03654345912, 0.19129607347], + [0.52980975805, -0.31643269316, -0.38572557579, 0.11647220296], ], - ] + } + assert set(sub_sample) == set(expected) + for i in sub_sample: + assert pytest.approx(sub_sample[i]) == expected[i] @pytest.mark.parametrize( - "initial_state_names, final_state_names, expected_sample", + "initial_state_name, final_state_names, expected_sample", [ ( "J/psi(1S)", ("pi0", "pi0", "pi0"), - [ - [ - [0.799667989, 0.159823862, 0.156340839, 0.841233472], - [-0.364360112, -0.371962329, 0.347228344, 0.640234742], - [0.403805561, 0.417294074, -0.208401449, 0.631540320], - ], - [ - [-0.053789754, -0.535237707, -0.947232044, 1.097652050], - [1.168326711, -0.060296302, -0.805136016, 1.426564296], - [0.014812643, 0.081738919, 1.233338364, 1.243480165], - ], - [ - [-0.745878234, 0.375413844, 0.790891204, 1.158014477], - [-0.803966599, 0.432258632, 0.457907671, 1.030100961], - [-0.418618204, -0.499032994, -1.024936914, 1.221879513], - ], - ], + MomentumPool( + { + 0: [ + [0.841233472, 0.799667989, 0.159823862, 0.156340839], + [0.640234742, -0.364360112, -0.371962329, 0.347228344], + [0.631540320, 0.403805561, 0.417294074, -0.208401449], + ], + 1: [ + [1.09765205, -0.05378975, -0.53523771, -0.94723204], + [1.426564296, 1.168326711, -0.060296302, -0.805136016], + [1.243480165, 0.014812643, 0.081738919, 1.233338364], + ], + 2: [ + [1.158014477, -0.745878234, 0.375413844, 0.790891204], + [1.030100961, -0.803966599, 0.432258632, 0.457907671], + [1.22187951, -0.41861820, -0.49903210, -1.02493691], + ], + } + ), ), ( - ("J/psi(1S)"), + "J/psi(1S)", ("pi0", "pi0", "pi0", "gamma"), - [ - [ - [0.037458949, 0.339629143, -0.369297399, 0.520913076], - [-0.569078090, 0.687702756, -0.760836072, 1.180624927], - [0.543652274, 0.220242315, -0.077206475, 0.606831154], - ], - [ - [0.130561009, 0.299006221, -0.012444727, 0.353305116], - [0.123009165, 0.057692537, 0.033979586, 0.194507152], - [0.224048290, -0.156048645, 0.130817046, 0.331482507], - ], - [ - [0.236609937, -0.366594420, 1.192296945, 1.276779728], - [0.571746863, -0.586304492, 1.051145223, 1.339317905], - [0.402982692, -0.697161285, 0.083274400, 0.820720580], - ], - [ - [-0.404629896, -0.272040943, -0.810554818, 0.945902078], - [-0.125677938, -0.159090801, -0.324288738, 0.382450013], - [-1.170683257, 0.632967615, -0.136884971, 1.337865758], - ], - ], + MomentumPool( + { + 0: [ + [0.520913076, 0.037458949, 0.339629143, -0.369297399], + [1.180624927, -0.569078090, 0.687702756, -0.760836072], + [0.606831154, 0.543652274, 0.220242315, -0.077206475], + ], + 1: [ + [0.353305116, 0.130561009, 0.299006221, -0.012444727], + [0.194507152, 0.123009165, 0.057692537, 0.033979586], + [0.331482507, 0.224048290, -0.156048645, 0.130817046], + ], + 2: [ + [1.276779728, 0.236609937, -0.366594420, 1.192296945], + [1.339317905, 0.571746863, -0.586304492, 1.051145223], + [0.820720580, 0.402982692, -0.697161285, 0.083274400], + ], + 3: [ + [0.945902080, -0.40462990, -0.27204094, -0.81055482], + [0.38245001, -0.12567794, -0.15909080, -0.32428874], + [1.337865758, -1.170683257, 0.632967615, -0.136884971], + ], + } + ), ), ( "J/psi(1S)", ("pi0", "pi0", "pi0", "pi0", "gamma"), - [ - [ - [0.715439409, -0.284844373, -0.623772405, 1.000150296], - [0.134562969, 0.189723778, 0.229578969, 0.353592342], - [0.655088513, -0.205095150, -0.222905673, 0.734241552], - ], - [ - [-0.062423993, 0.008278542, -0.516645045, 0.537685901], - [-0.075102421, -0.215361523, 0.351626927, 0.440319420], - [-0.569846157, -0.063070826, 0.199036046, 0.621720722], - ], - [ - [-0.190428491, -0.002167052, 0.540188288, 0.588463958], - [-0.114856586, -0.554777459, -0.515051054, 0.777474366], - [-0.120958419, 0.236101553, -0.455239823, 0.543908922], - ], - [ - [-0.286712460, -0.089479316, 0.393698133, 0.513251926], - [0.536198573, -0.215753382, -0.007385008, 0.593575359], - [-0.442948181, -0.261969339, 0.187557768, 0.564116725], - ], - [ - [-0.175874464, 0.368212199, 0.206531028, 0.457347916], - [-0.480802535, 0.796168585, -0.058769834, 0.931938511], - [0.478664245, 0.294033763, 0.291551681, 0.632912076], - ], - ], + MomentumPool( + { + 0: [ + [1.000150296, 0.715439409, -0.284844373, -0.623772405], + [0.353592342, 0.134562969, 0.189723778, 0.229578969], + [0.734241552, 0.655088513, -0.205095150, -0.222905673], + ], + 1: [ + [0.537685901, -0.062423993, 0.008278542, -0.516645045], + [0.440319420, -0.075102421, -0.215361523, 0.351626927], + [0.621720722, -0.569846157, -0.063070826, 0.199036046], + ], + 2: [ + [0.588463958, -0.190428491, -0.002167052, 0.540188288], + [0.77747437, -0.11485659, -0.55477746, -0.51505105], + [0.543908922, -0.120958419, 0.236101553, -0.455239823], + ], + 3: [ + [0.513251926, -0.286712460, -0.089479316, 0.393698133], + [0.593575359, 0.536198573, -0.215753382, -0.007385008], + [0.564116725, -0.442948181, -0.261969339, 0.187557768], + ], + 4: [ + [0.457347916, -0.175874464, 0.368212199, 0.206531028], + [0.931938511, -0.480802535, 0.796168585, -0.058769834], + [0.632912076, 0.478664245, 0.294033763, 0.291551681], + ], + } + ), ), ], ) def test_generate_phsp( - initial_state_names, final_state_names, expected_sample, pdg + initial_state_name: str, + final_state_names: Sequence[str], + expected_sample: MomentumPool, + pdg: ParticleCollection, ): - reaction_info = ParticleReactionKinematicsInfo( - initial_state_names, final_state_names, pdg + reaction_info = ReactionInfo( + initial_state={-1: pdg[initial_state_name]}, + final_state={i: pdg[name] for i, name in enumerate(final_state_names)}, ) kin = HelicityKinematics(reaction_info) sample_size = 3 rng = TFUniformRealNumberGenerator(seed=0) sample = generate_phsp(sample_size, kin, random_generator=rng) - assert sample.shape == (len(final_state_names), sample_size, 4) + assert len(sample) == len(final_state_names) + for four_momenta in sample.values(): + assert len(four_momenta) == sample_size assert pytest.approx(sample, abs=1e-8) == expected_sample diff --git a/tests/data/test_tf_phasespace.py b/tests/data/test_tf_phasespace.py index c39030a5..a52fc950 100644 --- a/tests/data/test_tf_phasespace.py +++ b/tests/data/test_tf_phasespace.py @@ -1,12 +1,10 @@ import pytest +from expertsystem.amplitude.kinematics import ReactionInfo from tensorwaves.data.tf_phasespace import ( TFPhaseSpaceGenerator, TFUniformRealNumberGenerator, ) -from tensorwaves.physics.helicity_formalism.kinematics import ( - ParticleReactionKinematicsInfo, -) class TestTFPhaseSpaceGenerator: @@ -15,16 +13,27 @@ def test_generate_deterministic(pdg): sample_size = 5 initial_state_names = ["J/psi(1S)"] final_state_names = ["K0", "Sigma+", "p~"] - reaction_info = ParticleReactionKinematicsInfo( - initial_state_names=initial_state_names, - final_state_names=final_state_names, - particles=pdg, + reaction_info = ReactionInfo( + initial_state={ + i: pdg[name] + for i, name in zip( + range(-len(initial_state_names) - 1, 0), + initial_state_names, + ) + }, + final_state={ + i: pdg[name] + for i, name in zip( + range(-len(final_state_names) - 1, 0), final_state_names + ) + }, ) rng = TFUniformRealNumberGenerator(seed=123) phsp_generator = TFPhaseSpaceGenerator(reaction_info) four_momenta, weights = phsp_generator.generate(sample_size, rng) - assert four_momenta.shape == (len(final_state_names), sample_size, 4) - assert weights.shape == (sample_size,) + for values in four_momenta.values(): + assert len(values) == sample_size + assert len(weights) == sample_size assert pytest.approx(four_momenta, abs=1e-6) == [ [ [0.357209, 0.251997, 0.244128, 0.705915], diff --git a/tests/optimizer/test_gradient.py b/tests/optimizer/test_gradient.py new file mode 100644 index 00000000..1076aa44 --- /dev/null +++ b/tests/optimizer/test_gradient.py @@ -0,0 +1,122 @@ +from itertools import product +from typing import Any, Callable, Dict, List + +import numpy as np +import pytest + +from tensorwaves.estimator import gradient_creator + + +class Function1D: + def __init__(self, a: float, b: float, c: float) -> None: + self.__a = a + self.__b = b + self.__c = c + + def __call__(self, parameters: dict) -> Any: + x = parameters["x"] + return self.__a * x * x + self.__b * x + self.__c + + def true_gradient(self, parameters: dict) -> dict: + return {"x": 2.0 * self.__a * parameters["x"] + self.__b} + + +class Function2D: + def __init__(self, a: float, b: float, c: float) -> None: + self.__a = a + self.__b = b + self.__c = c + + def __call__(self, parameters: dict) -> Any: + # pylint: disable=invalid-name + x = parameters["x"] + y = parameters["y"] + return self.__a * x * x - self.__b * x * y + self.__c * y + + def true_gradient(self, parameters: dict) -> dict: + return { + "x": 2.0 * self.__a * parameters["x"] - self.__b * parameters["y"], + "y": -self.__b * parameters["x"] + self.__c, + } + + +# Now we just evaluate the gradient function at different positions x and +# compare with the expected values +@pytest.mark.parametrize( + "function, params_cases", + [ + ( + Function1D(a=2, b=3, c=5), + [{"x": x} for x in np.arange(-1.0, 1.0, 0.5)], + ) + ] + + [ + ( + Function1D(a=-4, b=1, c=2), + [{"x": x} for x in np.arange(-1.0, 1.0, 0.5)], + ) + ] + + [ + ( + Function1D(a=3, b=-2, c=-7), + [{"x": x} for x in np.arange(-1.0, 1.0, 0.5)], + ) + ] + + [ + ( + Function1D(a=3, b=-2, c=-7), + [{"x": x} for x in np.arange(-1.0, 1.0, 0.5)], + ) + ] + + [ + ( + Function2D(a=2, b=3, c=5), # type: ignore + [ + {"x": x, "y": y} + for x, y in product( + np.arange(-1.0, 1.0, 0.5), np.arange(-1.0, 1.0, 0.5) + ) + ], + ) + ] + + [ + ( + Function2D(a=-4, b=1, c=2), # type: ignore + [ + {"x": x, "y": y} + for x, y in product( + np.arange(-1.0, 1.0, 0.5), np.arange(-1.0, 1.0, 0.5) + ) + ], + ) + ] + + [ + ( + Function2D(a=3, b=-2, c=-7), # type: ignore + [ + {"x": x, "y": y} + for x, y in product( + np.arange(-1.0, 1.0, 0.5), np.arange(-1.0, 1.0, 0.5) + ) + ], + ) + ] + + [ + ( + Function2D(a=3, b=-2, c=-7), # type: ignore + [ + {"x": x, "y": y} + for x, y in product( + np.arange(-1.0, 1.0, 0.5), np.arange(-1.0, 1.0, 0.5) + ) + ], + ) + ], +) +def test_jax_gradient( + function: Callable[[Dict[str, float]], float], + params_cases: List[Dict[str, float]], +): + grad = gradient_creator(function, backend="jax") # type: ignore + for params in params_cases: + assert grad(params) == function.true_gradient(params) # type: ignore diff --git a/tests/optimizer/test_minuit.py b/tests/optimizer/test_minuit.py index 3ec986a8..66df9a4f 100644 --- a/tests/optimizer/test_minuit.py +++ b/tests/optimizer/test_minuit.py @@ -1,30 +1,92 @@ -# pylint: disable=redefined-outer-name +from typing import Callable, Optional import pytest +from tensorwaves.interfaces import Estimator +from tensorwaves.optimizer.minuit import Minuit2 -class TestMinuit2: - @staticmethod - def test_optimize(fit_result: dict, free_parameters: dict): - result = fit_result - assert set(result) == { - "parameter_values", - "parameter_errors", - "log_likelihood", - "function_calls", - "execution_time", - } - par_values = result["parameter_values"] - par_errors = result["parameter_errors"] - assert set(par_values) == set(free_parameters) - assert pytest.approx(result["log_likelihood"]) == -12961.752837852626 - assert pytest.approx(par_values["Width_f(0)(500)"]) == 0.5546203338476 - assert pytest.approx(par_errors["Width_f(0)(500)"]) == 0.00981961384643 - assert ( - pytest.approx(par_values["Position_f(0)(980)"]) - == 0.9897319832688249 - ) - assert ( - pytest.approx(par_errors["Position_f(0)(980)"]) - == 0.0007052210121507604 - ) + +class Polynomial1DMinimaEstimator: + def __init__(self, polynomial: Callable) -> None: + self.__polynomial = polynomial + + def __call__(self, parameters: dict) -> float: + _x = parameters["x"] + return self.__polynomial(_x) + + @property + def parameters(self) -> dict: + return {"x": 0.0} + + +class Polynomial2DMinimaEstimator: + def __init__(self, polynomial: Callable) -> None: + self.__polynomial = polynomial + + def __call__(self, parameters: dict) -> float: + _x = parameters["x"] + _y = parameters["y"] + return self.__polynomial(_x, _y) + + @property + def parameters(self) -> dict: + return {"x": 0.0, "y": 0.0} + + +@pytest.mark.parametrize( + "estimator, initial_params, expected_result", + [ + ( + Polynomial1DMinimaEstimator(lambda x: x ** 2 - 1), + {"x": 0.5}, + {"x": 0.0}, + ), + ( + Polynomial1DMinimaEstimator(lambda x: x ** 2 - 1), + {"x": -0.5}, + {"x": 0.0}, + ), + ( + Polynomial1DMinimaEstimator(lambda x: (x - 1) ** 2 - 3 * x + 1), + {"x": -0.5}, + {"x": 2.5}, # 2 (x - 1) - 3 == 0 -> x = 3/2 + 1 + ), + ( + Polynomial1DMinimaEstimator( + lambda x: x ** 3 + (x - 1) ** 2 - 3 * x + 1 + ), + {"x": -1.0}, + {"x": 1.0}, + ), + ( + Polynomial1DMinimaEstimator( + lambda x: x ** 3 + (x - 1) ** 2 - 3 * x + 1 + ), + {"x": -2.0}, + None, # no convergence + ), + ( + Polynomial2DMinimaEstimator( + lambda x, y: (x - 1) ** 2 + (y + 1) ** 2 + ), + {"x": -2.0, "y": 4.0}, + {"x": 1.0, "y": -1.0}, + ), + ], +) +def test_minuit2( + estimator: Estimator, initial_params: dict, expected_result: Optional[dict] +): + minuit2 = Minuit2() + result = minuit2.optimize(estimator, initial_params) + + par_values = result["parameter_values"] + par_errors = result["parameter_errors"] + + if expected_result: + for par_name, value in expected_result.items(): + assert value == pytest.approx( + par_values[par_name], abs=3 * par_errors[par_name] + ) + else: + assert result["minimum_valid"] is False diff --git a/tests/optimizer/test_parameter_flattener.py b/tests/optimizer/test_parameter_flattener.py new file mode 100644 index 00000000..da87e45a --- /dev/null +++ b/tests/optimizer/test_parameter_flattener.py @@ -0,0 +1,54 @@ +# pylint: disable=redefined-outer-name + +import pytest + +from tensorwaves.optimizer.minuit import ParameterFlattener + + +@pytest.fixture(scope="module") +def parameter_flattener(): + return ParameterFlattener({"var1": 1 + 0j, "var2": 2}) + + +@pytest.mark.parametrize( + "unflattened_parameters, expected_flattened_parameters", + [ + ( + {"var1": 0.5 + 2j, "var2": -1.2}, + {"real_var1": 0.5, "imag_var1": 2.0, "var2": -1.2}, + ), + ( + {"var1": 0.5 - 6.4j, "var2": -1.2}, + {"real_var1": 0.5, "imag_var1": -6.4, "var2": -1.2}, + ), + ], +) +def test_parameter_flattening( + parameter_flattener, unflattened_parameters, expected_flattened_parameters +): + assert ( + parameter_flattener.flatten(unflattened_parameters) + == expected_flattened_parameters + ) + + +@pytest.mark.parametrize( + "flattened_parameters, expected_unflattened_parameters", + [ + ( + {"real_var1": 0.5, "imag_var1": 2.0, "var2": -1.2}, + {"var1": 0.5 + 2j, "var2": -1.2}, + ), + ( + {"real_var1": 0.5, "imag_var1": -6.4, "var2": -1.2}, + {"var1": 0.5 - 6.4j, "var2": -1.2}, + ), + ], +) +def test_parameter_unflatten( + parameter_flattener, flattened_parameters, expected_unflattened_parameters +): + assert ( + parameter_flattener.unflatten(flattened_parameters) + == expected_unflattened_parameters + ) diff --git a/tests/physics/helicity_formalism/test_amplitude.py b/tests/physics/helicity_formalism/test_amplitude.py deleted file mode 100644 index 42209f57..00000000 --- a/tests/physics/helicity_formalism/test_amplitude.py +++ /dev/null @@ -1,160 +0,0 @@ -import expertsystem.amplitude.model as es -import numpy as np -import pytest # type: ignore -import tensorflow as tf - -from tensorwaves.physics.helicity_formalism.amplitude import ( - IntensityBuilder, - _CoefficientAmplitude, - _CoherentIntensity, - _create_dynamics, - _IncoherentIntensity, - _SequentialAmplitude, -) -from tensorwaves.physics.helicity_formalism.kinematics import ( - HelicityKinematics, -) - - -def linear_func(input_value): - return input_value - - -@pytest.mark.parametrize( - "functions, test_data, expected_results", - [ - ( - [linear_func], - [1.0, 2.0, 3.0, 4.2, 0.2], - [1.0, 2.0, 3.0, 4.2, 0.2], - ), - ( - [linear_func, linear_func], - [1.0, 2.0, 3.0, 4.2, 0.2], - [2.0 * x for x in [1.0, 2.0, 3.0, 4.2, 0.2]], - ), - ], -) -def test_incoherent_intensity(functions, test_data, expected_results): - model = _IncoherentIntensity(functions) - results = model(test_data).numpy() - np.testing.assert_array_almost_equal(results, expected_results, decimal=6) - - -@pytest.mark.parametrize( - "functions, test_data, expected_results", - [ - ( - [linear_func], - [(1.0 + 2.0j), (1.5 - 1.4j), (0.12 + 20.0j)], - [5.0, 4.21, 400.0144], - ), - ( - [linear_func, linear_func], - [(1.0 + 2.0j), (1.5 - 1.4j), (-0.23 + 3.2j)], - [20.0, 16.84, 41.1716], - ), - ], -) -def test_coherent_intensity(functions, test_data, expected_results): - model = _CoherentIntensity(functions) - results = model(test_data).numpy() - np.testing.assert_array_almost_equal(results, expected_results, decimal=6) - - -@pytest.mark.parametrize( - "function, mag, phase, test_data, expected_results", - [ - ( - linear_func, - 2.0, - 0.0, - [(1.0 + 2.0j), (1.5 - 1.4j), (0.12 + 20.0j)], - [(2.0 + 4.0j), (3.0 - 2.8j), (0.24 + 40.0j)], - ), - ( - linear_func, - 3.0, - 0.5 * np.pi, - [(1.0 + 2.0j), (1.5 - 1.4j), (-0.23 + 3.2j)], - [(-6.0 + 3.0j), (4.2 + 4.5j), (-9.6 - 0.69j)], - ), - ], -) -def test_coefficient_amplitude( - function, mag, phase, test_data, expected_results -): - model = _CoefficientAmplitude( - function, - tf.constant(mag, dtype=tf.float64), - tf.constant(phase, dtype=tf.float64), - ) - results = model(test_data).numpy() - np.testing.assert_array_almost_equal(results, expected_results, decimal=6) - - -@pytest.mark.parametrize( - "functions, test_data, expected_results", - [ - ( - [linear_func], - [(1.0 + 2.0j), (1.5 - 1.4j), (0.12 + 20.0j)], - [(1.0 + 2.0j), (1.5 - 1.4j), (0.12 + 20.0j)], - ), - ( - [linear_func, linear_func], - [(1.0 + 2.0j), (1.5 - 1.4j), (-0.23 + 3.2j)], - [(-3.0 + 4.0j), (0.29 - 4.2j), (-10.1871 - 1.472j)], - ), - ], -) -def test_sequential_amplitude(functions, test_data, expected_results): - model = _SequentialAmplitude(functions) - results = model(test_data).numpy() - np.testing.assert_array_almost_equal(results, expected_results, decimal=6) - - -@pytest.mark.parametrize( - "decaying_particle_name, valid", - [ - ("p", False), - ("pi0", True), - ], -) -def test_invalid_angular_momentum_error(decaying_particle_name, valid, pdg): - kinematics = HelicityKinematics(None) # type: ignore - builder = IntensityBuilder(pdg, kinematics) - # pylint: disable=protected-access - builder._dynamics = es.ParticleDynamics(pdg, parameters=es.FitParameters()) - builder._dynamics.set_breit_wigner(decaying_particle_name) - dec_prod_fs_ids = [[0], [1]] - decaying_particle = es.HelicityParticle( - particle=pdg.find(decaying_particle_name), helicity=0 - ) - inv_mass_name = "foo" - - amplitude_node = es.HelicityDecay( - decaying_particle=decaying_particle, decay_products=[] - ) - - if not valid: - with pytest.raises( - ValueError, match=r".*Model invalid.*angular momentum.*" - ): - _create_dynamics( - builder=builder, - amplitude_node=amplitude_node, - dec_prod_fs_ids=dec_prod_fs_ids, - decaying_state=decaying_particle, - inv_mass_name=inv_mass_name, - kinematics=kinematics, - ) - else: - _create_dynamics( - builder=builder, - amplitude_node=amplitude_node, - dec_prod_fs_ids=dec_prod_fs_ids, - decaying_state=decaying_particle, - inv_mass_name=inv_mass_name, - kinematics=kinematics, - ) diff --git a/tests/physics/helicity_formalism/test_angular_distributions.py b/tests/physics/helicity_formalism/test_angular_distributions.py deleted file mode 100644 index e1889ff1..00000000 --- a/tests/physics/helicity_formalism/test_angular_distributions.py +++ /dev/null @@ -1,543 +0,0 @@ -# cspell:ignore asarray dphi epem histogramdd isclass nquad scipy yerr ylabel -# cspell:ignore ylim -# pylint: disable=import-outside-toplevel,no-self-use,redefined-outer-name - -import os -from abc import ABC, abstractmethod -from functools import reduce -from math import cos, sqrt -from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple - -import expertsystem as es -import numpy as np -import pytest -import scipy.integrate as integrate -import sympy -from matplotlib import pyplot as plt -from sympy.abc import symbols -from sympy.physics.quantum.spin import WignerD - -from tensorwaves.data.generate import ( - TFUniformRealNumberGenerator, - generate_data, -) -from tensorwaves.physics.helicity_formalism.amplitude import IntensityBuilder -from tensorwaves.physics.helicity_formalism.kinematics import ( - HelicityKinematics, -) - -SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) - - -class AngularDistributionTest(ABC): - @staticmethod - @abstractmethod - def calc_distributions() -> List[Tuple[str, Any]]: - pass - - -class TestEpemToDmD0Pip(AngularDistributionTest): - # Use this function to reproduce the model file. - # Note the normalization part has been removed! - @pytest.fixture(scope="module") - def amplitude_model(self) -> es.amplitude.model.AmplitudeModel: - epem = es.particle.Particle( - name="EpEm", - pid=12345678, - mass=4.36, - spin=1.0, - parity=es.particle.Parity(-1), - c_parity=es.particle.Parity(-1), - ) - particles = es.io.load_pdg() - particles.add(epem) - - result = es.generate_transitions( - initial_state=[("EpEm", [-1])], - final_state=[("D0", [0]), ("D-", [0]), ("pi+", [0])], - allowed_intermediate_particles=["D(2)*(2460)+"], - allowed_interaction_types="em", - particles=particles, - ) - - model = es.generate_amplitudes(result) - model.dynamics.set_non_dynamic("D(2)*(2460)+") - model.dynamics["D(2)*(2460)+"].form_factor = None # type: ignore - model.dynamics["EpEm"].form_factor = None # type: ignore - es.io.write(model, f"{SCRIPT_DIR}/{TestEpemToDmD0Pip.__name__}.yml") - return model - - # Use this function to reproduce the theoretical predictions. - @staticmethod - def calc_distributions() -> List[Tuple[str, Any]]: - theta1, phi1, theta2, phi2, dphi = symbols( - "theta1,phi1,theta2,phi2,dphi", real=True - ) - - amp = ( - WignerD(1, -1, 1, -phi1, theta1, phi1) - * WignerD(2, 1, 0, -phi2, theta2, phi2) - - 1 - * WignerD(1, -1, -1, -phi1, theta1, phi1) - * WignerD(2, -1, 0, -phi2, theta2, phi2) - ).doit() - - intensity = sympy.simplify( - (amp * sympy.conjugate(amp)).expand(complex=True) - ) - intensity = sympy.simplify(intensity.replace(phi2, dphi + phi1)) - assert sympy.im(intensity) == 0 - - all_variables = [theta1, phi1, theta2, dphi] - return [ - ( - f"{var.name} dependency:", - calculate_sympy_integral( - intensity, - all_variables[0:i] + all_variables[i + 1 :], - ), - ) - for i, var in enumerate(all_variables) - ] - - @pytest.fixture(scope="module") - def intensity_dataset( - self, amplitude_model: es.amplitude.model.AmplitudeModel - ) -> np.ndarray: - return generate_dataset(amplitude_model, events=50000) - - @pytest.mark.parametrize( - "angular_variable, expected_distribution_function", # type: ignore - [ - ( # x = cos(theta) distribution from epem decay - "theta_3+4_2", - lambda x: 1 + x * x, - ), - ( # phi distribution of the epem decay - "phi_3+4_2", - lambda x: 1, - ), - ( # x = cos(theta') distribution from D2* - "theta_3_4_vs_2", - lambda x: 1 - (2 * x * x - 1) ** 2, - ), - ( # phi' distribution of the D2* decay - "phi_3_4_vs_2", - lambda phi: 2 + cos(2 * phi), - ), - # ( # 2d distribution of the D2* decay - # ['theta_3_4_vs_2', 'phi_3_4_vs_2'], - # lambda x, phi: (1 - x**2) * (x**2) * (2 + cos(2 * phi)), - # ) - ], # type: ignore - ) - def test_distributions_reduced_chi2( - self, - angular_variable: str, - expected_distribution_function: Callable, - intensity_dataset, - ) -> None: - - verify_angular_distribution( - intensity_dataset, - angular_variable, - expected_distribution_function, - chisquare_test, - bins=180, - make_plots=True, - ) - - -class TestD1ToD0PiPi(AngularDistributionTest): - # Use this function to reproduce the model file. - # Note the normalization part has been removed! - @pytest.fixture(scope="session") - def amplitude_model(self) -> es.amplitude.model.AmplitudeModel: - result = es.generate_transitions( - initial_state=[("D(1)(2420)0", [-1])], - final_state=[("D0", [0]), ("pi-", [0]), ("pi+", [0])], - allowed_intermediate_particles=["D*"], - allowed_interaction_types="strong", - ) - model = es.generate_amplitudes(result) - - model.dynamics.set_non_dynamic("D*(2010)+") - model.dynamics["D(1)(2420)0"].form_factor = None # type: ignore - model.dynamics["D*(2010)+"].form_factor = None # type: ignore - model.parameters[ - "Magnitude_D(1)(2420)0_to_D*(2010)+_0+pi-_0;D*(2010)+_to_D0_0+pi+_0;" - ].value = 0.5 - - es.io.write(model, f"{SCRIPT_DIR}/{TestD1ToD0PiPi.__name__}.yml") - return model - - # Use this function to reproduce the theoretical predictions. - @staticmethod - def calc_distributions() -> List[Tuple[str, Any]]: - theta1, phi1, theta2, phi2 = symbols( - "theta1,phi1,theta2,phi2", real=True - ) - - # The phi1 dependency vanishes completely, hence phi2 can be seen as the - # difference between the two phi angles. - amp = ( - WignerD(1, -1, -1, -phi1, theta1, 0) - * WignerD(1, -1, 0, -phi2, theta2, 0) - + 0.5 - * WignerD(1, -1, 0, -phi1, theta1, 0) - * WignerD(1, 0, 0, -phi2, theta2, 0) - + WignerD(1, -1, 1, -phi1, theta1, 0) - * WignerD(1, 1, 0, -phi2, theta2, 0) - ).doit() - - intensity = sympy.simplify( - (amp * sympy.conjugate(amp)).expand(complex=True) - ) - - assert sympy.im(intensity) == 0 - - all_variables = [theta1, phi1, theta2, phi2] - return [ - ( - f"{var.name} dependency:", - calculate_sympy_integral( - intensity, - all_variables[0:i] + all_variables[i + 1 :], - ), - ) - for i, var in enumerate(all_variables) - ] - - @pytest.fixture(scope="module") - def intensity_dataset( - self, amplitude_model: es.amplitude.model.AmplitudeModel - ) -> np.ndarray: - return generate_dataset(amplitude_model, events=30000) - - @pytest.mark.parametrize( - "angular_variable, expected_distribution_function", # type: ignore - [ - ( # x = cos(theta) distribution from D1 decay - "theta_3+4_2", - lambda x: 1.25 + 0.75 * x * x, - ), - ( # x = cos(theta') distribution from D* - "theta_3_4_vs_2", - lambda x: 1 - 0.75 * x * x, - ), - ( # phi distribution of the D* decay - "phi_3_4_vs_2", - lambda x: 1 - 1 / 2.25 * cos(2 * x), - ), - ], # type: ignore - ) - def test_distributions_reduced_chi2( - self, - angular_variable: str, - expected_distribution_function: Callable, - intensity_dataset, - ) -> None: - - verify_angular_distribution( - intensity_dataset, - angular_variable, - expected_distribution_function, - chisquare_test, - bins=120, - make_plots=False, - ) - - @pytest.mark.parametrize( - "angular_variable, expected_distribution_function", # type: ignore - [ - ( # x = cos(theta) distribution from D1 decay - "theta_3+4_2", - lambda x: 1.25 + 0.75 * x * x, - ), - ( # x = cos(theta') distribution from D* - "theta_3_4_vs_2", - lambda x: 1 - 0.75 * x * x, - ), - ( # phi distribution of the D* decay - "phi_3_4_vs_2", - lambda x: 1 - 1 / 2.25 * cos(2 * x), - ), - ], # type: ignore - ) - def test_distributions_residuals( - self, - angular_variable: str, - expected_distribution_function: Callable, - intensity_dataset, - ) -> None: - - verify_angular_distribution( - intensity_dataset, - angular_variable, - expected_distribution_function, - residual_test, - bins=120, - make_plots=False, - ) - - -class Histogram: - def __init__( - self, - var_name: str, - bin_edges: Sequence[Sequence[float]], - bin_contents: Sequence[float], - bin_errors: Sequence[float], - **mpl_kwargs: Any, - ) -> None: - self.var_name = var_name - self.bin_edges = bin_edges - self.bin_contents = bin_contents - self.bin_errors = bin_errors - self.mpl_kwargs = mpl_kwargs - - -def chisquare_test(histogram: Histogram, func: Callable) -> None: - def __chisquare( - values: Sequence[float], - errors: Sequence[float], - expected: Sequence[float], - ) -> float: - return np.sum( - [ - ((x[0] - x[1]) / x[2]) ** 2 - for x in zip(values, expected, errors) - ] - ) - - function_hist = __function_to_histogram(func, histogram) - function_hist = __scale_to_other_histogram(function_hist, histogram) - degrees_of_freedom = ( - reduce((lambda x, y: x * y), np.asarray(histogram.bin_contents).shape) - - 1 - ) - - redchi2 = ( - __chisquare( - histogram.bin_contents, - histogram.bin_errors, - function_hist.bin_contents, - ) - / degrees_of_freedom - ) - error = sqrt( - 2 / degrees_of_freedom - ) # accurate for large degrees of freedom and gaussian errors - - assert abs(redchi2 - 1.0) < 3.0 * error - - -def residual_test(histogram: Histogram, func: Callable) -> None: - function_hist = __function_to_histogram(func, histogram) - function_hist = __scale_to_other_histogram(function_hist, histogram) - - residuals = [ - (x[0] - x[1]) / x[2] - for x in zip( - histogram.bin_contents, - function_hist.bin_contents, - histogram.bin_errors, - ) - ] - _n = len(histogram.bin_contents) - mean_error = sqrt(_n) - sample_variance = np.sum(np.square(residuals)) / (_n - 1) - sample_std_dev_error = sqrt( - sample_variance / (2.0 * (_n - 1)) - ) # only true for gaussian distribution - assert abs(np.mean(residuals)) < mean_error - assert abs(sqrt(sample_variance) - 1.0) < 3.0 * sample_std_dev_error - - -def __function_to_histogram(func: Callable, histogram: Histogram) -> Histogram: - bin_edges = histogram.bin_edges - - def __integrate_within_bins( - func: Callable, integration_ranges: Sequence[Tuple[float, float]] - ) -> Tuple[Sequence[float], Sequence[float]]: - results = [integrate.nquad(func, [x]) for x in integration_ranges] - return ([x[0] for x in results], [x[1] for x in results]) - - integrals, errors = __integrate_within_bins( - func, list(zip(bin_edges[0][:-1], bin_edges[0][1:])) - ) - return Histogram( - histogram.var_name, - bin_edges, - integrals, - errors, - ) - - -def __scale_to_other_histogram( - histogram: Histogram, histogram_reference: Histogram -) -> Histogram: - normalization = np.sum(histogram_reference.bin_contents) / np.sum( - histogram.bin_contents - ) - - new_bin_contents = np.multiply(normalization, histogram.bin_contents) - new_bin_errors = [np.sqrt(normalization) * x for x in histogram.bin_errors] - return Histogram( - histogram.var_name, - histogram.bin_edges, - new_bin_contents, - bin_errors=new_bin_errors, - ) - - -def generate_dataset( - model: es.amplitude.model.AmplitudeModel, events: int -) -> np.ndarray: - kinematics = HelicityKinematics.from_model(model) - part_list = model.particles - - builder = IntensityBuilder(part_list, kinematics) - intensity = builder.create_intensity(model) - - rng = TFUniformRealNumberGenerator(seed=0) - sample = generate_data(events, kinematics, intensity, random_generator=rng) - - return kinematics.convert(sample) - - -def verify_angular_distribution( - dataset: np.ndarray, - variable_name: str, - expected_distribution_function: Callable, - test_function: Callable[[Histogram, Callable], None], - bins: int = 120, - make_plots: bool = False, -) -> None: - def __to_cosine( - datarecord: np.ndarray, column_name: str - ) -> Tuple[np.array, str]: - return ( - [cos(x) for x in datarecord[column_name]], - "cos" + column_name, - ) - - def __make_histogram( - var_name: str, - values: np.array, - weights: Optional[np.array] = None, - bins: int = 50, - **kwargs: Any, - ) -> Histogram: - bin_content, bin_edges = np.histogramdd( - values, bins=bins, weights=weights - ) - if len(bin_content.shape) == 1: - errs = [np.sqrt(x) if x > 0 else 1 for x in bin_content] - elif len(bin_content.shape) == 2: - errs = [ - [np.sqrt(x) if x > 0 else 1 for x in row] - for row in bin_content - ] - return Histogram(var_name, bin_edges, bin_content, errs, **kwargs) - - def __plot_distributions_1d( - histograms: Dict[str, Histogram], - use_bin_centers: bool = True, - **kwargs: Any, - ) -> None: - plt.clf() - var_name = "" - for name, histogram in histograms.items(): - bincenters = histogram.bin_edges - if use_bin_centers: - bincenters = 0.5 * ( - np.array(histogram.bin_edges[0][1:]) - + np.array(histogram.bin_edges[0][:-1]) - ) - plt.errorbar( - bincenters, - histogram.bin_contents, - yerr=histogram.bin_errors, - label=name, - **(histogram.mpl_kwargs), - ) - if var_name == "": - var_name = histogram.var_name - - if plt.ylim()[0] > 0.0: - plt.ylim(bottom=0.0) - axis = plt.gca() - if "x_title" in kwargs: - axis.set_xlabel(kwargs["x_title"]) - else: - axis.set_xlabel(var_name) - axis.set_ylabel("") - axis.legend() - plt.tight_layout() - plt.savefig(var_name + ".png", bbox_inches="tight") - - if "theta" in variable_name and "cos" not in variable_name: - var_data, var_name = __to_cosine(dataset, variable_name) - else: - var_data = dataset[variable_name] - var_name = variable_name - - data_hist = __make_histogram( - var_name, - var_data, - bins=bins, - fmt="o", - ) - - if make_plots: - function_hist = __function_to_histogram( - expected_distribution_function, data_hist - ) - function_hist = __scale_to_other_histogram(function_hist, data_hist) - function_hist.mpl_kwargs = {"fmt": "-"} - - hist_bundle = {"data": data_hist, "theory": function_hist} - __plot_distributions_1d(hist_bundle, x_title=var_name) - - test_function(data_hist, expected_distribution_function) - - -def calculate_sympy_integral( - intensity: Any, - integration_variables: List[Any], - jacobi_determinant: Optional[Any] = None, -) -> Any: - if jacobi_determinant is None: - for int_var in integration_variables: - if "theta" in int_var.name: - intensity *= sympy.sin(int_var) - else: - intensity *= jacobi_determinant - return sympy.trigsimp( - sympy.re( - sympy.integrate( - intensity, - *( - (x, -sympy.pi, sympy.pi) - if "phi" in x.name - else (x, 0, sympy.pi) - for x in integration_variables - ), - ) - ) - ).doit() - - -def calc_distributions(): - import inspect - import sys - - for _, obj in inspect.getmembers(sys.modules[__name__]): - if inspect.isclass(obj) and issubclass(obj, AngularDistributionTest): - distributions = obj.calc_distributions() - print(distributions) - - -if __name__ == "__main__": - calc_distributions() diff --git a/tests/physics/helicity_formalism/test_canonical.py b/tests/physics/helicity_formalism/test_canonical.py deleted file mode 100644 index e4a564e8..00000000 --- a/tests/physics/helicity_formalism/test_canonical.py +++ /dev/null @@ -1,147 +0,0 @@ -import math - -import expertsystem.amplitude.model as es -import pytest # type: ignore - -from tensorwaves.physics.helicity_formalism.amplitude import ( - _clebsch_gordan_coefficient, - _determine_canonical_prefactor, -) - - -@pytest.mark.parametrize( - "clebsch_gordan, expected_value", - [ - ( - es.ClebschGordan( - J=1.0, - M=1.0, - j_1=0.5, - m_1=0.5, - j_2=0.5, - m_2=0.5, - ), - 1, - ), - ( - es.ClebschGordan( - J=1.0, - M=0.0, - j_1=0.5, - m_1=0.5, - j_2=0.5, - m_2=-0.5, - ), - math.sqrt(1 / 2), - ), - ( - es.ClebschGordan( - J=1.0, - M=0.0, - j_1=0.5, - m_1=-0.5, - j_2=0.5, - m_2=0.5, - ), - math.sqrt(1 / 2), - ), - ( - es.ClebschGordan( - J=0.0, - M=0.0, - j_1=0.5, - m_1=-0.5, - j_2=0.5, - m_2=0.5, - ), - -math.sqrt(1 / 2), - ), - ( - es.ClebschGordan( - J=0.0, - M=0.0, - j_1=0.5, - m_1=0.5, - j_2=0.5, - m_2=-0.5, - ), - math.sqrt(1 / 2), - ), - ( - es.ClebschGordan( - J=3.0, - M=3.0, - j_1=2.0, - m_1=2.0, - j_2=1.0, - m_2=1.0, - ), - 1, - ), - ( - es.ClebschGordan( - J=3.0, - M=2.0, - j_1=2.0, - m_1=2.0, - j_2=1.0, - m_2=0.0, - ), - math.sqrt(1 / 3), - ), - ( - es.ClebschGordan( - J=1.0, - M=1.0, - j_1=2.0, - m_1=0.0, - j_2=1.0, - m_2=1.0, - ), - math.sqrt(1 / 10), - ), - ], -) -def test_clebsch_gordan_coefficient( - clebsch_gordan: es.ClebschGordan, expected_value: float -): - cgc = _clebsch_gordan_coefficient(clebsch_gordan) - assert cgc == pytest.approx(expected_value, rel=1e-6) - - -@pytest.mark.parametrize( - "cano_decay, expected_value", - [ - ( - es.CanonicalDecay( - decaying_particle=None, # type: ignore - decay_products=None, # type: ignore - l_s=es.ClebschGordan( - J=1.0, M=1.0, j_1=2.0, m_1=0.0, j_2=1.0, m_2=1.0 - ), - s2s3=es.ClebschGordan( - J=1.0, M=1.0, j_1=0.0, m_1=0.0, j_2=1.0, m_2=1.0 - ), - ), - math.sqrt(1 / 10) * 1, - ), - ( - es.CanonicalDecay( - decaying_particle=None, # type: ignore - decay_products=None, # type: ignore - l_s=es.ClebschGordan( - J=1.0, M=1.0, j_1=2.0, m_1=0.0, j_2=1.0, m_2=1.0 - ), - s2s3=es.ClebschGordan( - J=1.0, M=1.0, j_1=1.0, m_1=0.0, j_2=1.0, m_2=1.0 - ), - ), - math.sqrt(1 / 10) * -math.sqrt(1 / 2), - ), - ], -) -def test_determine_canonical_prefactor( - cano_decay: es.CanonicalDecay, expected_value: float -): - prefactor = _determine_canonical_prefactor(cano_decay) - assert prefactor == pytest.approx(expected_value, rel=1e-6) diff --git a/tests/physics/helicity_formalism/test_helicity_angles.py b/tests/physics/helicity_formalism/test_helicity_angles.py deleted file mode 100644 index 9aca8254..00000000 --- a/tests/physics/helicity_formalism/test_helicity_angles.py +++ /dev/null @@ -1,155 +0,0 @@ -import expertsystem.amplitude.model as es -import numpy as np -import pytest - -from tensorwaves.physics.helicity_formalism.kinematics import ( - HelicityKinematics, - SubSystem, -) - -TEST_DATA = { - "events": { - 2: [ - (0.514208, -0.184219, 1.23296, 1.35527), - (0.0727385, -0.0528868, 0.826163, 0.841933), - (-0.162529, 0.29976, -0.411133, 0.550927), - (0.0486171, 0.151922, 0.370309, 0.425195), - (-0.0555915, -0.100214, -0.0597338, 0.186869), - (0.238921, 0.266712, -1.20442, 1.26375), - (0.450724, -0.439515, -0.360076, 0.737698), - (0.552298, 0.440006, 0.644927, 0.965809), - (-0.248155, -0.158587, -0.229673, 0.397113), - (1.33491, 0.358535, 0.0457548, 1.38955), - ], - 3: [ - (-0.305812, 0.284, -0.630057, 0.755744), - (0.784483, 0.614347, -0.255334, 1.02861), - (-0.20767, 0.272796, 0.0990739, 0.356875), - (0.404557, 0.510467, -0.276426, 0.70757), - (0.47713, 0.284575, -0.775431, 0.953902), - (-0.204775, -0.0197981, 0.0799868, 0.220732), - (0.00590727, 0.709346, -0.190877, 0.734602), - (0.329157, -0.431973, 0.272873, 0.607787), - (-0.201436, -0.534829, 0.256253, 0.626325), - (-0.196357, 0.00211926, -0.33282, 0.386432), - ], - 4: [ - (-0.061663, -0.0211864, 0.144596, 0.208274), - (-0.243319, -0.283044, -0.234866, 0.461193), - (0.82872, -0.0465425, -0.599834, 1.03294), - (0.263003, -0.089236, 0.686187, 0.752466), - (0.656892, -0.107848, 0.309898, 0.746588), - (0.521569, -0.0448683, 0.43283, 0.692537), - (-0.517582, -0.676002, -0.0734335, 0.865147), - (-0.975278, -0.0207817, -0.934467, 1.35759), - (-0.41665, 0.237646, 0.691269, 0.852141), - (-0.464203, -0.358114, 0.13307, 0.616162), - ], - 5: [ - (-0.146733, -0.0785946, -0.747499, 0.777613), - (-0.613903, -0.278416, -0.335962, 0.765168), - (-0.458522, -0.526014, 0.911894, 1.15616), - (-0.716177, -0.573154, -0.780069, 1.21167), - (-1.07843, -0.0765127, 0.525267, 1.20954), - (-0.555715, -0.202046, 0.691605, 0.919879), - (0.0609506, 0.406171, 0.624387, 0.759452), - (0.0938229, 0.012748, 0.0166676, 0.165716), - (0.866241, 0.455769, -0.717849, 1.22132), - (-0.674348, -0.0025409, 0.153994, 0.704759), - ], - }, - "angles": { - (((3, 4, 5), (2,)), (), ()): [ - (-0.914298, 2.79758), - (-0.994127, 2.51292), - (0.769715, -1.07396), - (-0.918418, -1.88051), - (0.462214, 1.06433), - (0.958535, -2.30129), - (0.496489, 2.36878), - (-0.674376, -2.46888), - (0.614968, 0.568649), - (-0.0330843, -2.8792), - ], - (((4, 5), (3,)), (2,), ()): [ - (-0.772533, 1.04362), - (0.163659, 1.87349), - (0.556365, 0.160733), - (0.133251, -2.81088), - (-0.0264361, 2.84379), - (0.227188, 2.29128), - (-0.166924, 2.24539), - (0.652761, -1.20272), - (0.443122, 0.615838), - (0.503577, 2.98067), - ], - (((4,), (5,)), (3,), (2,)): [ - (0.460324, -2.77203), - (-0.410464, 1.45339), - (0.248566, -2.51096), - (-0.301959, 2.71085), - (-0.522502, -1.12706), - (0.787267, -3.01323), - (0.488066, 2.07305), - (0.954167, 0.502648), - (-0.553114, -1.23689), - (0.00256349, 1.7605), - ], - }, -} - - -@pytest.mark.parametrize( - "test_events, expected_angles", - [(TEST_DATA["events"], TEST_DATA["angles"])], -) # pylint: disable=too-many-locals -def test_helicity_angles_correctness(test_events, expected_angles, pdg): - kinematics = es.Kinematics( - initial_state={0: pdg["J/psi(1S)"]}, - final_state={ - 2: pdg["pi0"], - 3: pdg["gamma"], - 4: pdg["pi0"], - 5: pdg["pi0"], - }, - ) - model = es.AmplitudeModel( - particles=pdg, - kinematics=kinematics, - parameters=None, # type: ignore - intensity=None, # type: ignore - dynamics=None, # type: ignore - ) - kin = HelicityKinematics.from_model(model) - # raise ValueError(kin.reaction_kinematics_info.fs_id_event_pos_mapping) - subsys_angle_names = {} - for subsys in expected_angles.keys(): - temp_names = kin.register_subsystem(SubSystem(*subsys)) - subsys_angle_names.update({subsys: [temp_names[1], temp_names[2]]}) - - data = np.array(tuple(np.array(v) for v in test_events.values())) - kinematic_vars = kin.convert(data) - - assert len(kinematic_vars) == 3 * len(expected_angles.keys()) - number_of_events = len(data[0]) - for subsys, angle_names in subsys_angle_names.items(): - for name in angle_names: - assert len(kinematic_vars[name]) == number_of_events - - expected_values = np.array(np.array(expected_angles[subsys]).T) - # test cos(theta) - np.testing.assert_array_almost_equal( - np.cos(kinematic_vars[angle_names[0]]), expected_values[0], 1e-6 - ) - # test phi - if subsys == (((4,), (5,)), (3,), (2,)): - for kin_var, expected in zip( - kinematic_vars[angle_names[1]], expected_values[1] - ): - assert round(kin_var, 4) == round( - expected - np.pi, 4 - ) or round(kin_var, 4) == round(expected + np.pi, 4) - else: - np.testing.assert_array_almost_equal( - kinematic_vars[angle_names[1]], expected_values[1], 1e-6 - ) diff --git a/tests/physics/helicity_formalism/test_kinematics.py b/tests/physics/helicity_formalism/test_kinematics.py index 28afb9ce..241a8466 100644 --- a/tests/physics/helicity_formalism/test_kinematics.py +++ b/tests/physics/helicity_formalism/test_kinematics.py @@ -2,22 +2,23 @@ import numpy as np import pandas as pd -from expertsystem.amplitude.model import AmplitudeModel class TestHelicityKinematics: def test_convert( self, - helicity_model: AmplitudeModel, data_sample: np.ndarray, data_set: dict, ): + # The argument helicity model was removed until the kinematics has + # been ported to the expertsystem, and kinematics info can be extracted + # from the model as well assert set(data_set) == { - "mSq_2", - "mSq_2+3+4", - "mSq_3", - "mSq_3+4", - "mSq_4", + "m_2", + "m_2+3+4", + "m_3", + "m_3+4", + "m_4", "phi_3+4_2", "phi_3_4_vs_2", "theta_3+4_2", @@ -25,11 +26,12 @@ def test_convert( } _, sample_size, _ = data_sample.shape assert sample_size == 10000 - final_state = helicity_model.kinematics.final_state + + # this part is also hardcoded until the kinematics is ported to the es float_only_variables = { - "mSq_2": final_state[2].mass ** 2, - "mSq_3": final_state[3].mass ** 2, - "mSq_4": final_state[4].mass ** 2, + "m_2": 0.0, + "m_3": 0.1349768, + "m_4": 0.1349768, } for var_name, value in data_set.items(): if var_name in float_only_variables: diff --git a/tests/physics/test_amplitude.py b/tests/physics/test_amplitude.py new file mode 100644 index 00000000..7df5d882 --- /dev/null +++ b/tests/physics/test_amplitude.py @@ -0,0 +1,68 @@ +# pylint: disable=redefined-outer-name + +import numpy as np +import pytest +import sympy as sp + +from tensorwaves.physics.amplitude import SympyModel + + +@pytest.fixture(scope="module") +def function() -> SympyModel: + c_1, c_2, c_3, c_4 = sp.symbols("c_1,c_2,c_3,c_4") + x = sp.Symbol("x", real=True) + params = { + c_1: 1 + 1j, + c_2: -1 + 1j, + c_3: 1 - 1j, + c_4: -1 - 1j, + } + expression = ( + c_1 * sp.sqrt(x) / x + + c_2 * sp.exp(-sp.Rational(1, 2) * ((x - 2) / sp.Rational(1, 2)) ** 2) + + c_3 * (x ** 2 - 3 * x) + + c_4 + ) + expression = expression.subs(params) + expression = sp.simplify((sp.conjugate(expression) * expression)) + return SympyModel(expression=expression, parameters=params) + + +@pytest.mark.parametrize( + "test_data, expected_results", + [ + ( + {"x": np.array([0.5, 1.0, 1.5, 2.0, 2.5])}, + [3.52394, 9.11931, 16.3869, 18.1716, 7.16359], + ), + ], +) +def test_complex_amplitude(function, test_data, expected_results): + results = function(test_data) + np.testing.assert_array_almost_equal(results, expected_results, decimal=4) + + +def test_helicity(helicity_model: SympyModel): + assert set(helicity_model.parameters) == { + "C[J/\\psi(1S) \\to f_{0}(980)_{0} \\gamma_{+1};f_{0}(980) \\to \\pi^{0}_{0} \\pi^{0}_{0}]", + "C[J/\\psi(1S) \\to f_{0}(500)_{0} \\gamma_{+1};f_{0}(500) \\to \\pi^{0}_{0} \\pi^{0}_{0}]", + "m_f(0)(980)", + "d_f(0)(980)", + "Gamma_f(0)(980)", + "m_f(0)(500)", + "d_f(0)(500)", + "Gamma_f(0)(500)", + } + + +def test_canonical(canonical_model: SympyModel): + assert set(canonical_model.parameters) == { + "C[J/\\psi(1S) \\to f_{0}(980)_{0} \\gamma_{+1};f_{0}(980) \\to \\pi^{0}_{0} \\pi^{0}_{0}]", + "C[J/\\psi(1S) \\to f_{0}(500)_{0} \\gamma_{+1};f_{0}(500) \\to \\pi^{0}_{0} \\pi^{0}_{0}]", + "m_f(0)(980)", + "d_f(0)(980)", + "Gamma_f(0)(980)", + "m_f(0)(500)", + "d_f(0)(500)", + "Gamma_f(0)(500)", + } diff --git a/tests/recipe/__init__.py b/tests/recipe/__init__.py deleted file mode 100644 index 948df262..00000000 --- a/tests/recipe/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Required to set mypy options for the tests folder.""" diff --git a/tests/recipe/test_amplitude_creation.py b/tests/recipe/test_amplitude_creation.py deleted file mode 100644 index c63d5628..00000000 --- a/tests/recipe/test_amplitude_creation.py +++ /dev/null @@ -1,77 +0,0 @@ -import os -from copy import deepcopy - -import expertsystem.amplitude.model as es - -from tensorwaves.data.generate import generate_phsp -from tensorwaves.physics.helicity_formalism.amplitude import IntensityBuilder -from tensorwaves.physics.helicity_formalism.kinematics import ( - HelicityKinematics, -) - -os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" - - -NUMBER_OF_PHSP_EVENTS = 10 - - -def _generate_phsp(recipe: es.AmplitudeModel, number_of_events: int): - kinematics = HelicityKinematics.from_model(recipe) - phsp_sample = generate_phsp(number_of_events, kinematics) - return phsp_sample - - -def test_helicity(helicity_model: es.AmplitudeModel): - # https://github.com/ComPWA/tensorwaves/issues/171 - model = deepcopy(helicity_model) - kinematics = HelicityKinematics.from_model(model) - masses_is = kinematics.reaction_kinematics_info.initial_state_masses - masses_fs = kinematics.reaction_kinematics_info.final_state_masses - assert masses_is == [3.0969] - assert masses_fs == [0.0, 0.1349768, 0.1349768] - - phsp_sample = _generate_phsp(model, NUMBER_OF_PHSP_EVENTS) - assert phsp_sample.shape == (3, NUMBER_OF_PHSP_EVENTS, 4) - - builder = IntensityBuilder(model.particles, kinematics, phsp_sample) - intensity = builder.create_intensity(model) - assert set(intensity.parameters) == { - "Magnitude_J/psi(1S)_to_f(0)(500)_0+gamma_1;f(0)(500)_to_pi0_0+pi0_0;", - "Magnitude_J/psi(1S)_to_f(0)(980)_0+gamma_1;f(0)(980)_to_pi0_0+pi0_0;", - "MesonRadius_J/psi(1S)", - "MesonRadius_f(0)(500)", - "MesonRadius_f(0)(980)", - "Phase_J/psi(1S)_to_f(0)(500)_0+gamma_1;f(0)(500)_to_pi0_0+pi0_0;", - "Phase_J/psi(1S)_to_f(0)(980)_0+gamma_1;f(0)(980)_to_pi0_0+pi0_0;", - "Position_J/psi(1S)", - "Position_f(0)(500)", - "Position_f(0)(980)", - "Width_J/psi(1S)", - "Width_f(0)(500)", - "Width_f(0)(980)", - } - - -def test_canonical(canonical_model: es.AmplitudeModel): - # https://github.com/ComPWA/tensorwaves/issues/171 - model = deepcopy(canonical_model) - particles = model.particles - kinematics = HelicityKinematics.from_model(model) - phsp_sample = _generate_phsp(model, NUMBER_OF_PHSP_EVENTS) - builder = IntensityBuilder(particles, kinematics, phsp_sample) - intensity = builder.create_intensity(model) - assert set(intensity.parameters) == { - "Magnitude_J/psi(1S)_to_f(0)(500)_0+gamma_1;f(0)(500)_to_pi0_0+pi0_0;", - "Magnitude_J/psi(1S)_to_f(0)(980)_0+gamma_1;f(0)(980)_to_pi0_0+pi0_0;", - "MesonRadius_J/psi(1S)", - "MesonRadius_f(0)(500)", - "MesonRadius_f(0)(980)", - "Phase_J/psi(1S)_to_f(0)(500)_0+gamma_1;f(0)(500)_to_pi0_0+pi0_0;", - "Phase_J/psi(1S)_to_f(0)(980)_0+gamma_1;f(0)(980)_to_pi0_0+pi0_0;", - "Position_J/psi(1S)", - "Position_f(0)(500)", - "Position_f(0)(980)", - "Width_J/psi(1S)", - "Width_f(0)(500)", - "Width_f(0)(980)", - } diff --git a/tests/test_estimator.py b/tests/test_estimator.py index 27d14a8a..09a11009 100644 --- a/tests/test_estimator.py +++ b/tests/test_estimator.py @@ -1,21 +1,186 @@ -from tensorwaves.estimator import UnbinnedNLL - - -class TestUnbinnedNLL: - @staticmethod - def test_parameters(estimator: UnbinnedNLL): - assert estimator.parameters == { - "MesonRadius_J/psi(1S)": 1.0, - "MesonRadius_f(0)(500)": 1.0, - "MesonRadius_f(0)(980)": 1.0, - "Magnitude_J/psi(1S)_to_f(0)(500)_0+gamma_1;f(0)(500)_to_pi0_0+pi0_0;": 1.0, - "Phase_J/psi(1S)_to_f(0)(500)_0+gamma_1;f(0)(500)_to_pi0_0+pi0_0;": 0.0, - "Magnitude_J/psi(1S)_to_f(0)(980)_0+gamma_1;f(0)(980)_to_pi0_0+pi0_0;": 1.0, - "Phase_J/psi(1S)_to_f(0)(980)_0+gamma_1;f(0)(980)_to_pi0_0+pi0_0;": 0.0, - "Position_J/psi(1S)": 3.0969, - "Width_J/psi(1S)": 9.29e-05, - "Position_f(0)(500)": 0.475, - "Width_f(0)(500)": 0.55, - "Position_f(0)(980)": 0.99, - "Width_f(0)(980)": 0.06, +# pylint: disable=invalid-name, redefined-outer-name + +import math +from typing import Dict, Union + +import numpy as np +import pytest +import sympy as sp +from expertsystem.amplitude.data import DataSet + +from tensorwaves.estimator import SympyUnbinnedNLL +from tensorwaves.optimizer.minuit import Minuit2 +from tensorwaves.physics.amplitude import SympyModel + + +def gaussian(mu_: float, sigma_: float) -> SympyModel: + x, mu, sigma = sp.symbols("x, mu, sigma") + return SympyModel( + expression=(sp.exp(-(((x - mu) / sigma) ** 2) / 2)), + parameters={ + mu: mu_, + sigma: sigma_, + }, + ) + + +def gaussian_sum( + a_1: float, + mu_1: float, + sigma_1: float, + a_2: float, + mu_2: float, + sigma_2: float, +) -> SympyModel: + x, a1, mu1, sigma1, a2, mu2, sigma2 = sp.symbols( + "x, a1, mu1, sigma1, a2, mu2, sigma2" + ) + gaussian1 = ( + a1 + / (sigma1 * sp.sqrt(2.0 * math.pi)) + * sp.exp(-(((x - mu1) / sigma1) ** 2) / 2) + ) + gaussian2 = ( + a2 + / (sigma2 * sp.sqrt(2.0 * math.pi)) + * sp.exp(-(((x - mu2) / sigma2) ** 2) / 2) + ) + + return SympyModel( + expression=gaussian1 + gaussian2, + parameters={ + a1: a_1, + mu1: mu_1, + sigma1: sigma_1, + a2: a_2, + mu2: mu_2, + sigma2: sigma_2, + }, + ) + + +@pytest.fixture(scope="module") +def phsp_dataset() -> DataSet: + rng = np.random.default_rng(12345) + return DataSet( + { + "x": rng.uniform(low=-2.0, high=5.0, size=10000), } + ) + + +__np_rng = np.random.default_rng(12345) + + +@pytest.mark.parametrize( + "model, dataset, true_params", + [ + ( + gaussian(1.0, 0.1), + DataSet( + { + "x": __np_rng.normal(0.5, 0.1, 1000), + } + ), + {"mu": 0.5}, + ), + ( + gaussian(1.0, 0.1), + DataSet( + { + "x": __np_rng.normal(0.5, 0.3, 1000), + } + ), + {"mu": 0.5, "sigma": 0.3}, + ), + ( + gaussian_sum(1.0, 1.0, 0.1, 2.0, 2.0, 0.3), + DataSet( + { + "x": np.append( + __np_rng.normal( + 1.0, + 0.1, + 2000, + ), + __np_rng.normal( + 2.0, + 0.3, + 1000, + ), + ) + } + ), + { + "a2": 0.5 + }, # ratio should be A1/A2 = 2000/1000 -- A1=1 --> A2=0.5 + ), + ( + gaussian_sum(1.0, 1.0, 0.1, 1.0, 2.0, 0.3), + DataSet( + { + "x": np.append( + __np_rng.normal( + 0.9, + 0.3, + 1000, + ), + __np_rng.normal( + 2.5, + 0.1, + 1000, + ), + ) + } + ), + {"mu1": 0.9, "sigma1": 0.3, "mu2": 2.5, "sigma2": 0.1}, + ), + ( + gaussian_sum(1.0, 1.0, 0.1, 2.0, 2.5, 0.3), + DataSet( + { + "x": np.append( + __np_rng.normal( + 0.9, + 0.3, + 2000, + ), + __np_rng.normal( + 2.5, + 0.1, + 1000, + ), + ) + } + ), + {"mu1": 0.9, "sigma1": 0.3, "a2": 0.5, "sigma2": 0.1}, + ), + ], +) +def test_sympy_unbinned_nll( + model: SympyModel, + dataset: DataSet, + true_params: Dict[str, Union[complex, float]], + phsp_dataset: DataSet, +): + estimator = SympyUnbinnedNLL( + model, + dataset, + phsp_dataset, + phsp_volume=6.0, + ) + minuit2 = Minuit2() + result = minuit2.optimize( + estimator, + initial_parameters=true_params, + ) + + par_values = result["parameter_values"] + par_errors = result["parameter_errors"] + + assert set(par_values) == set(true_params) + for par_name, par_value in true_params.items(): + assert ( + abs(par_values[par_name] - par_value) < 4.0 * par_errors[par_name] + ) + assert par_value == pytest.approx(par_values[par_name], rel=0.1)