From 855ce5fbc439f5bc5b2cc9400dfffeb865d5828c Mon Sep 17 00:00:00 2001 From: Marc van der Meide Date: Tue, 26 Sep 2023 09:29:49 +0200 Subject: [PATCH 01/72] Delete .github/workflows/install-canary.yaml --- .github/workflows/install-canary.yaml | 40 --------------------------- 1 file changed, 40 deletions(-) delete mode 100644 .github/workflows/install-canary.yaml diff --git a/.github/workflows/install-canary.yaml b/.github/workflows/install-canary.yaml deleted file mode 100644 index 8aaeb540f..000000000 --- a/.github/workflows/install-canary.yaml +++ /dev/null @@ -1,40 +0,0 @@ -name: canary installation -on: - schedule: - # Run the tests once every 24 hours to catch dependency problems early - - cron: '0 7 * * *' - push: - branches: - - install-canary - -jobs: - canary-installs: - runs-on: ${{ matrix.os }} - timeout-minutes: 12 - strategy: - fail-fast: false - matrix: - os: [ubuntu-latest, windows-latest, macos-latest] - python-version: ['3.8', '3.9'] - defaults: - run: - shell: bash -l {0} - steps: - - name: Setup python ${{ matrix.python-version }} conda environment - uses: conda-incubator/setup-miniconda@v2 - with: - python-version: ${{ matrix.python-version }} - - name: Install activity-browser - run: | - conda create -y -n ab -c conda-forge activity-browser python=${{ matrix.python-version }} - - name: Environment info - run: | - conda activate ab - conda list - conda env export - conda env export -f env.yaml - - name: Upload final environment as artifact - uses: actions/upload-artifact@v2 - with: - name: env-${{ matrix.os }}-${{ matrix.python-version }} - path: env.yaml From 79999447717d949a41a51be501640b8da87998d4 Mon Sep 17 00:00:00 2001 From: marc-vdm Date: Mon, 4 Dec 2023 20:15:05 +0100 Subject: [PATCH 02/72] GH action script for commenting on issues when related milestone is closed. --- .../workflows/comment-milestoned-issues.yaml | 33 +++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 .github/workflows/comment-milestoned-issues.yaml diff --git a/.github/workflows/comment-milestoned-issues.yaml b/.github/workflows/comment-milestoned-issues.yaml new file mode 100644 index 000000000..d8acfb3fa --- /dev/null +++ b/.github/workflows/comment-milestoned-issues.yaml @@ -0,0 +1,33 @@ +name: Comment when milestone is closed +on: + issues: + types: [milestoned] + + comment: + runs-on: ubuntu-latest + steps: + - name: Comment on issue + uses: actions/github-script@v5 + with: + script: | + const issue_number = context.issue.number; + const milestone_title = context.payload.issue.milestone.title; + + // Get all milestones + const milestones = await github.rest.issues.listMilestones({ + owner: context.repo.owner, + repo: context.repo.repo, + }); + + // Find the closed milestone that matches the title + const closedMilestone = milestones.data.find(milestone => milestone.title === milestone_title && milestone.state === 'closed'); + + if (closedMilestone) { + // Post a comment on the issue + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issue_number, + body: `This issue has been resolved in Activity Browser [version ${milestone_title](https://github.com/LCA-ActivityBrowser/activity-browser/releases/tag/${milestone_title), please [update Activity Browser](https://github.com/LCA-ActivityBrowser/activity-browser#updating-the-ab).`, + }); + } \ No newline at end of file From 9699278dd97fd7a2221c2032adb69779f642043d Mon Sep 17 00:00:00 2001 From: marc-vdm Date: Sun, 17 Sep 2023 16:39:36 +0200 Subject: [PATCH 03/72] update bw2io requirement --- ci/recipe/stable/meta.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ci/recipe/stable/meta.yaml b/ci/recipe/stable/meta.yaml index a7d97f13d..4353ce833 100644 --- a/ci/recipe/stable/meta.yaml +++ b/ci/recipe/stable/meta.yaml @@ -22,6 +22,7 @@ requirements: - setuptools run: - python >=3.8,<3.10 + - bw2io >=0.8.9 - arrow - brightway2 >=2.4.2 - pyperclip From 69f0a4e03367a52d317573130ba9b40abd21bb4f Mon Sep 17 00:00:00 2001 From: marc-vdm Date: Sun, 17 Sep 2023 16:44:05 +0200 Subject: [PATCH 04/72] Update readme install instructions --- README.md | 37 ------------------------------------- 1 file changed, 37 deletions(-) diff --git a/README.md b/README.md index 9f923f167..66f0129d7 100644 --- a/README.md +++ b/README.md @@ -59,43 +59,6 @@ conda activate ab activity-browser ``` -## The thorough way - -| :warning: The activity browser has dropped support for python versions below `3.8`| -|---| -| You should re-install if you have an older installation of the activity browser which doesn't use `python >= 3.8` (you can check with `conda list` or `python --version` in your conda environment). You can remove your existing environment with `conda remove -n ab --all` or choose a new environment name (instead of `ab`). Re-installing will not affect your activity-browser/brightway projects. | - -### Conda - -We recommend that you use **conda** to manage your python installation. You can install [Anaconda](https://www.anaconda.com/products/individual) or the more compact [miniconda](https://conda.io/miniconda.html) (Python 3 of course) for your operating system. Installation instructions for miniconda can be found [here](https://docs.conda.io/projects/conda/en/latest/user-guide/install/index.html). See also the [conda user guide](https://docs.conda.io/projects/conda/en/latest/user-guide/index.html) or the [Conda cheat sheet](https://docs.conda.io/projects/conda/en/latest/_downloads/843d9e0198f2a193a3484886fa28163c/conda-cheatsheet.pdf). - -Skip this step if you already have a working installation of anaconda or miniconda, but make sure to keep your conda installation up-to-date: `conda update conda`. - -### Add the Conda-Forge channel -The activity-browser has many dependencies that are managed by the [conda-forge](https://conda.io/docs/user-guide/tasks/manage-channels.html) channel. Open a cmd-window or terminal (in Windows you may have to use the Anaconda prompt) and type the following: - -```bash -conda config --prepend channels conda-forge -``` -### Install the AB with ecoinvent >=3.9 -After prepending the Conda-Forge channel the following line should be executed within the command prompt/terminal to install the AB and it's dependencies. - -```bash -conda create -n ab activity-browser -``` -This will install the Activity Browser with the latest version of the Brightway2 libraries (currently excluding Brightway2.5 libraries). - -### Install the AB with older ecoinvent versions (<3.9) - -If you want to work with with older versions of ecoinvent (<3.9) in the AB, a different Biosphere3 database needs to be installed. This requires a _**different version of the bw2io library**_ to be installed, see also [here](https://github.com/brightway-lca/brightway2-io). Note that this version of bw2io can ONLY work with ecoinvent versions < 3.9. If you want to work with version > 3.9 AND < 3.9, the only solution currently available is to use two separate virtual environments (i.e. two AB installations). - -To install a version of the AB that can handle ecoinvent versions <3.9, do the following: For a new installation from the conda-forge repository the same initial steps need to be made: Prepending the Conda-Forge repository in the channels, and installing the AB and dependencies. After the successful installation, the following two commands need to be executed before running the AB: 1) Remove the latest version of the Brightway2 Input-Output library, 2) Install an older version of the Brightway2 Input-Output library. - -```bash -conda remove --force bw2io -conda install bw2io=0.8.7 -``` - #### Activity Browser is installed At this point the activity-browser and all of its dependencies will be installed in a new conda environment called `ab`. You can change the environment name `ab` to whatever suits you. From a7fbd60fe80f7a592a9e9643818f264923480db2 Mon Sep 17 00:00:00 2001 From: marc-vdm Date: Sun, 17 Sep 2023 16:47:25 +0200 Subject: [PATCH 05/72] Update meta.yaml --- ci/recipe/stable/meta.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/recipe/stable/meta.yaml b/ci/recipe/stable/meta.yaml index 4353ce833..9041ddfad 100644 --- a/ci/recipe/stable/meta.yaml +++ b/ci/recipe/stable/meta.yaml @@ -22,7 +22,7 @@ requirements: - setuptools run: - python >=3.8,<3.10 - - bw2io >=0.8.9 + - bw2io >=0.8.10 - arrow - brightway2 >=2.4.2 - pyperclip From c8fe9549f3f586a840ace5454eae17213028e012 Mon Sep 17 00:00:00 2001 From: marc-vdm Date: Sun, 17 Sep 2023 23:03:40 +0200 Subject: [PATCH 06/72] Update README.md --- README.md | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index 66f0129d7..469f36e9b 100644 --- a/README.md +++ b/README.md @@ -27,11 +27,6 @@ Please also read and cite our [scientific paper](https://doi.org/10.1016/j.simpa # Contents - [Installation](#installation) - - [The quick way](#the-quick-way) - - [The thorough way](#the-thorough-way) - - [Conda](#conda) - - [Install the AB with ecoinvent >=3.9](#install-the-ab-with-ecoinvent-39) - - [Install the AB with ecoinvent <3.9](#install-the-ab-with-older-ecoinvent-versions-39) - [Updating the AB](#updating-the-ab) - [Getting started](#getting-started) - [Running the AB](#running-the-ab) @@ -49,8 +44,6 @@ Please also read and cite our [scientific paper](https://doi.org/10.1016/j.simpa # Installation -## The quick way - You can install and start the activity-browser like this: ```bash @@ -118,13 +111,11 @@ The plugin code has been designed and written by Remy le Calloch (supported by [ These are the plugins that we know about. To add your plugin to this list either open an issue, or a pull request. All submitted plugins will be reviewed, although all risks associated with their use shall be born by the user. -| Name | Description | Links | Author(s) | -|:---------|-------------|-------|-----------| -| Notebook | Use Jupyter notebooks from AB | [anaconda](https://anaconda.org/pan6ora/ab-plugin-template), [github](https://github.com/Pan6ora/ab-plugin-Notebook) | Rémy Le Calloch | -| ReSICLED | Evaluating the recyclability of electr(on)ic product for improving product design | [anaconda](https://anaconda.org/pan6ora/ab-plugin-resicled), [github](https://github.com/Pan6ora/ab-plugin-ReSICLED) | G-SCOP Laboratory | -| ScenarioLink | Enables you to seamlessly fetch and reproduce scenario-based LCA databases, such as those generated by [premise](https://github.com/polca/premise) | [anaconda](https://anaconda.org/romainsacchi/ab-plugin-scenariolink), [pypi](https://pypi.org/project/ab-plugin-scenariolink/), [github](https://github.com/polca/ScenarioLink) | Romain Sacchi & Marc van der Meide | +| Name | Description | Links | Author | +|:---------|-------------|-------|--------| | template | An empty plugin to start from | [anaconda](https://anaconda.org/pan6ora/ab-plugin-template), [github](https://github.com/Pan6ora/activity-browser-plugin-template) | Rémy Le Calloch | - +| Notebook | Use Jupyter notebooks from AB | [anaconda](https://anaconda.org/pan6ora/ab-plugin-notebook), [github](https://github.com/Pan6ora/ab-plugin-Notebook) | Rémy Le Calloch | +| ReSICLED | Evaluating the recyclability of electr(on)ic product for improving product design | [anaconda](https://anaconda.org/pan6ora/ab-plugin-resicled), [github](https://github.com/Pan6ora/ab-plugin-ReSICLED) | G-SCOP Laboratory | ## Installation From 5cb49c7bd7c3b7f9884a502a383469b1f3f40dd2 Mon Sep 17 00:00:00 2001 From: marc-vdm Date: Mon, 18 Sep 2023 10:04:07 +0200 Subject: [PATCH 07/72] Change requirement to brightway2 instead of bw2io --- ci/recipe/stable/meta.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ci/recipe/stable/meta.yaml b/ci/recipe/stable/meta.yaml index 9041ddfad..4b3ffc575 100644 --- a/ci/recipe/stable/meta.yaml +++ b/ci/recipe/stable/meta.yaml @@ -22,9 +22,8 @@ requirements: - setuptools run: - python >=3.8,<3.10 - - bw2io >=0.8.10 - arrow - - brightway2 >=2.4.2 + - brightway2 >=2.4.4 - pyperclip - eidl >=1.4.2 - networkx From 7da0e5c495ab3fa1486972bd9e4e26b6fc87c507 Mon Sep 17 00:00:00 2001 From: bsteubing <33026150+bsteubing@users.noreply.github.com> Date: Mon, 25 Sep 2023 15:52:23 +0200 Subject: [PATCH 08/72] Update README.md --- README.md | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 51 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 469f36e9b..fe1e6f730 100644 --- a/README.md +++ b/README.md @@ -27,6 +27,11 @@ Please also read and cite our [scientific paper](https://doi.org/10.1016/j.simpa # Contents - [Installation](#installation) + - [The quick way](#the-quick-way) + - [The thorough way](#the-thorough-way) + - [Conda](#conda) + - [Install the AB with ecoinvent >=3.9](#install-the-ab-with-ecoinvent-39) + - [Install the AB with ecoinvent <3.9](#install-the-ab-with-older-ecoinvent-versions-39) - [Updating the AB](#updating-the-ab) - [Getting started](#getting-started) - [Running the AB](#running-the-ab) @@ -44,6 +49,8 @@ Please also read and cite our [scientific paper](https://doi.org/10.1016/j.simpa # Installation +## The quick way + You can install and start the activity-browser like this: ```bash @@ -52,6 +59,43 @@ conda activate ab activity-browser ``` +## The thorough way + +| :warning: The activity browser has dropped support for python versions below `3.8`| +|---| +| You should re-install if you have an older installation of the activity browser which doesn't use `python >= 3.8` (you can check with `conda list` or `python --version` in your conda environment). You can remove your existing environment with `conda remove -n ab --all` or choose a new environment name (instead of `ab`). Re-installing will not affect your activity-browser/brightway projects. | + +### Conda + +We recommend that you use **conda** to manage your python installation. You can install [Anaconda](https://www.anaconda.com/products/individual) or the more compact [miniconda](https://conda.io/miniconda.html) (Python 3 of course) for your operating system. Installation instructions for miniconda can be found [here](https://docs.conda.io/projects/conda/en/latest/user-guide/install/index.html). See also the [conda user guide](https://docs.conda.io/projects/conda/en/latest/user-guide/index.html) or the [Conda cheat sheet](https://docs.conda.io/projects/conda/en/latest/_downloads/843d9e0198f2a193a3484886fa28163c/conda-cheatsheet.pdf). + +Skip this step if you already have a working installation of anaconda or miniconda, but make sure to keep your conda installation up-to-date: `conda update conda`. + +### Add the Conda-Forge channel +The activity-browser has many dependencies that are managed by the [conda-forge](https://conda.io/docs/user-guide/tasks/manage-channels.html) channel. Open a cmd-window or terminal (in Windows you may have to use the Anaconda prompt) and type the following: + +```bash +conda config --prepend channels conda-forge +``` +### Install the AB with ecoinvent >=3.9 +After prepending the Conda-Forge channel the following line should be executed within the command prompt/terminal to install the AB and it's dependencies. + +```bash +conda create -n ab activity-browser +``` +This will install the Activity Browser with the latest version of the Brightway2 libraries (currently excluding Brightway2.5 libraries). + +### Install the AB with older ecoinvent versions (<3.9) + +If you want to work with with older versions of ecoinvent (<3.9) in the AB, a different Biosphere3 database needs to be installed. This requires a _**different version of the bw2io library**_ to be installed, see also [here](https://github.com/brightway-lca/brightway2-io). Note that this version of bw2io can ONLY work with ecoinvent versions < 3.9. If you want to work with version > 3.9 AND < 3.9, the only solution currently available is to use two separate virtual environments (i.e. two AB installations). + +To install a version of the AB that can handle ecoinvent versions <3.9, do the following: For a new installation from the conda-forge repository the same initial steps need to be made: Prepending the Conda-Forge repository in the channels, and installing the AB and dependencies. After the successful installation, the following two commands need to be executed before running the AB: 1) Remove the latest version of the Brightway2 Input-Output library, 2) Install an older version of the Brightway2 Input-Output library. + +```bash +conda remove --force bw2io +conda install bw2io=0.8.7 +``` + #### Activity Browser is installed At this point the activity-browser and all of its dependencies will be installed in a new conda environment called `ab`. You can change the environment name `ab` to whatever suits you. @@ -111,11 +155,13 @@ The plugin code has been designed and written by Remy le Calloch (supported by [ These are the plugins that we know about. To add your plugin to this list either open an issue, or a pull request. All submitted plugins will be reviewed, although all risks associated with their use shall be born by the user. -| Name | Description | Links | Author | -|:---------|-------------|-------|--------| -| template | An empty plugin to start from | [anaconda](https://anaconda.org/pan6ora/ab-plugin-template), [github](https://github.com/Pan6ora/activity-browser-plugin-template) | Rémy Le Calloch | -| Notebook | Use Jupyter notebooks from AB | [anaconda](https://anaconda.org/pan6ora/ab-plugin-notebook), [github](https://github.com/Pan6ora/ab-plugin-Notebook) | Rémy Le Calloch | -| ReSICLED | Evaluating the recyclability of electr(on)ic product for improving product design | [anaconda](https://anaconda.org/pan6ora/ab-plugin-resicled), [github](https://github.com/Pan6ora/ab-plugin-ReSICLED) | G-SCOP Laboratory | +| Name | Description | Links | Author(s) | +|:---------|-------------|-------|-----------| +| [ScenarioLink](https://github.com/polca/ScenarioLink) | Enables you to seamlessly fetch and reproduce scenario-based LCA databases, such as those generated by [premise](https://github.com/polca/premise) | [anaconda](https://anaconda.org/romainsacchi/ab-plugin-scenariolink), [pypi](https://pypi.org/project/ab-plugin-scenariolink/), [github](https://github.com/polca/ScenarioLink) | Romain Sacchi & Marc van der Meide | +| [ReSICLED](https://github.com/Pan6ora/ab-plugin-ReSICLED) | Evaluating the recyclability of electr(on)ic product for improving product design | [anaconda](https://anaconda.org/pan6ora/ab-plugin-resicled), [github](https://github.com/Pan6ora/ab-plugin-ReSICLED) | G-SCOP Laboratory | +| [Notebook](https://github.com/Pan6ora/ab-plugin-Notebook) | Use Jupyter notebooks from AB | [anaconda](https://anaconda.org/pan6ora/ab-plugin-template), [github](https://github.com/Pan6ora/ab-plugin-Notebook) | Rémy Le Calloch | +| [template](https://github.com/Pan6ora/activity-browser-plugin-template) | An empty plugin to start from | [anaconda](https://anaconda.org/pan6ora/ab-plugin-template), [github](https://github.com/Pan6ora/activity-browser-plugin-template) | Rémy Le Calloch | + ## Installation From a76fd33b13430b70eb75e5e65e5b35a0b9575b83 Mon Sep 17 00:00:00 2001 From: zoophobus Date: Fri, 22 Sep 2023 14:32:38 +0200 Subject: [PATCH 09/72] Increases the flexibility of the excel importer so it can also handle the parameter scenario files. --- activity_browser/bwutils/superstructure/excel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/activity_browser/bwutils/superstructure/excel.py b/activity_browser/bwutils/superstructure/excel.py index 440c1eb9c..9d6435c24 100644 --- a/activity_browser/bwutils/superstructure/excel.py +++ b/activity_browser/bwutils/superstructure/excel.py @@ -38,7 +38,7 @@ def get_header_index(document_path: Union[str, Path], import_sheet: int): sheet = wb.worksheets[import_sheet] for i in range(10): value = sheet.cell(i + 1, 1).value - if isinstance(value, str) and value.startswith("from activity name"): + if isinstance(value, str): wb.close() return i except IndexError as e: From 306d2ff0d1f271bd3c0e09a3a2f0fe9cbbd3fec4 Mon Sep 17 00:00:00 2001 From: bsteubing <33026150+bsteubing@users.noreply.github.com> Date: Wed, 27 Sep 2023 08:58:56 +0200 Subject: [PATCH 10/72] Update README.md How to install using Mamba --- README.md | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index fe1e6f730..1b30a0505 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,8 @@ Please also read and cite our [scientific paper](https://doi.org/10.1016/j.simpa - [Conda](#conda) - [Install the AB with ecoinvent >=3.9](#install-the-ab-with-ecoinvent-39) - [Install the AB with ecoinvent <3.9](#install-the-ab-with-older-ecoinvent-versions-39) -- [Updating the AB](#updating-the-ab) + - [Updating the AB](#updating-the-ab) + - [Mamba](#mamba) - [Getting started](#getting-started) - [Running the AB](#running-the-ab) - [Importing LCI databases](#importing-lci-databases) @@ -100,7 +101,7 @@ conda install bw2io=0.8.7 At this point the activity-browser and all of its dependencies will be installed in a new conda environment called `ab`. You can change the environment name `ab` to whatever suits you. -# Updating the AB +## Updating the AB We recommend to regularly update the AB to receive new features & bugfixes. These commands will update the activity-browser and all of its dependencies in the conda environment called `ab`. @@ -109,6 +110,16 @@ conda activate ab conda update activity-browser ``` +## Mamba + +You can also install the AB using [Mamba](https://mamba.readthedocs.io/en/latest/mamba-installation.html#mamba-install): + +```bash +mamba create -n ab activity-browser +mamba activate ab +activity-browser +``` + # Getting started ## Running the AB From b2f30cfb5e7c20358b4f23a8a8e430b163cb12a4 Mon Sep 17 00:00:00 2001 From: marc-vdm Date: Fri, 22 Sep 2023 15:02:49 +0200 Subject: [PATCH 11/72] Set up signals infrascructure --- activity_browser/controllers/activity.py | 40 +++++++++++++++++++ activity_browser/signals.py | 1 + activity_browser/ui/tables/inventory.py | 21 ++++++++++ .../ui/tables/models/inventory.py | 3 ++ 4 files changed, 65 insertions(+) diff --git a/activity_browser/controllers/activity.py b/activity_browser/controllers/activity.py index 50e13648f..50a8650c1 100644 --- a/activity_browser/controllers/activity.py +++ b/activity_browser/controllers/activity.py @@ -25,6 +25,7 @@ def __init__(self, parent=None): signals.delete_activity.connect(self.delete_activity) signals.delete_activities.connect(self.delete_activity) signals.duplicate_activity.connect(self.duplicate_activity) + signals.duplicate_activity_new_loc.connect(self.duplicate_activity_new_loc) signals.duplicate_activities.connect(self.duplicate_activity) signals.duplicate_to_db_interface.connect(self.show_duplicate_to_db_interface) signals.duplicate_to_db_interface_multiple.connect(self.show_duplicate_to_db_interface) @@ -139,6 +140,45 @@ def duplicate_activity(self, data: Union[tuple, Iterator[tuple]]) -> None: signals.database_changed.emit(db) signals.databases_changed.emit() + @Slot(tuple, name="copyActivityNewLoc") + def duplicate_activity_new_loc(self, data: tuple) -> None: + """Duplicates the selected activity in the same db, links to new location, with a new BW code. + + This function will try and link all exchanges in the same location as the production process + to a chosen location, if none is available for the given exchange, it will try to link to + RoW and then GLO, if those don't exist, the exchange is not altered. + """ + #TODO actually write def, this is just a copy of duplicate_activity above + activities = self._retrieve_activities(data) + + # See also https://github.com/LCA-ActivityBrowser/activity-browser/issues/1042 for what to do + # get list of dependent databases for activity and load to MetaDataStore + # get list of all unique locations in the dependent databases + # trigger dialog with autocomplete-writeable-dropdown-list (sorted alphabetically) + # check every exchange (act.technosphere) whether it can be replaced + # write a def that tries to find the processes and potential alternatives + + for act in activities: + new_code = self.generate_copy_code(act.key) + new_act = act.copy(new_code) + # Update production exchanges + for exc in new_act.production(): + if exc.input.key == act.key: + exc.input = new_act + exc.save() + # Update 'products' + for product in new_act.get('products', []): + if product.get('input') == act.key: + product['input'] = new_act.key + new_act.save() + AB_metadata.update_metadata(new_act.key) + signals.safe_open_activity_tab.emit(new_act.key) + + db = next(iter(activities)).get("database") + bw.databases.set_modified(db) + signals.database_changed.emit(db) + signals.databases_changed.emit() + @Slot(tuple, str, name="copyActivityToDbInterface") @Slot(list, str, name="copyActivitiesToDbInterface") def show_duplicate_to_db_interface(self, data: Union[tuple, Iterator[tuple]], diff --git a/activity_browser/signals.py b/activity_browser/signals.py index 25d8ae651..5eb4a15ab 100644 --- a/activity_browser/signals.py +++ b/activity_browser/signals.py @@ -44,6 +44,7 @@ class Signals(QObject): new_activity = Signal(str) # Trigger dialog to create a new activity in this database | name of database add_activity_to_history = Signal(tuple) # Add this activity to history | key of activity duplicate_activity = Signal(tuple) # Duplicate this activity | key of activity + duplicate_activity_new_loc = Signal(tuple) # Duplicate this activity to a new location | key of activity duplicate_activities = Signal(list) # Duplicate these activities | list of activity keys duplicate_activity_to_db = Signal(str, object) # Duplicate this activity to another database | name of target database, BW2 actiivty object #TODO write below 2 signals to work without the str, source database is already stored in activity keys diff --git a/activity_browser/ui/tables/inventory.py b/activity_browser/ui/tables/inventory.py index cf5cda4d2..4fa225c6b 100644 --- a/activity_browser/ui/tables/inventory.py +++ b/activity_browser/ui/tables/inventory.py @@ -113,6 +113,12 @@ def __init__(self, parent=None): self.duplicate_activity_action = QtWidgets.QAction( qicons.copy, "Duplicate activity/-ies", None ) + self.duplicate_activity_new_loc_action = QtWidgets.QAction( + qicons.copy, "Duplicate activity to new location", None + ) + self.duplicate_activity_new_loc_action.setToolTip( + "Duplicate this activity to another location.\n" + "Link the exchanges to a new location if it is availabe.") self.delete_activity_action = QtWidgets.QAction( qicons.delete, "Delete activity/-ies", None ) @@ -135,6 +141,13 @@ def contextMenuEvent(self, event) -> None: if self.indexAt(event.pos()).row() == -1 and len(self.model._dataframe) != 0: return + if self.selectedIndexes() > 1: + act = 'activities' + else: + act = 'activity' + self.duplicate_activity_action.setText("Duplicate {}".format(act)) + self.delete_activity_action.setText("Delete {}".format(act)) + menu = QtWidgets.QMenu() if len(self.model._dataframe) == 0: # if the database is empty, only add the 'new' activity option and return @@ -151,6 +164,9 @@ def contextMenuEvent(self, event) -> None: ) menu.addAction(self.new_activity_action) menu.addAction(self.duplicate_activity_action) + menu.addAction(self.duplicate_activity_new_loc_action) + if len(self.selectedIndexes()) > 1: + self.duplicate_activity_new_loc_action.setEnabled(False) menu.addAction(self.delete_activity_action) menu.addAction( qicons.edit, "Relink the activity exchanges", @@ -176,6 +192,7 @@ def connect_signals(self): lambda: signals.new_activity.emit(self.database_name) ) self.duplicate_activity_action.triggered.connect(self.duplicate_activities) + self.duplicate_activity_new_loc_action.triggered.connect(self.duplicate_activity_to_new_loc) self.delete_activity_action.triggered.connect(self.delete_activities) self.copy_exchanges_for_SDF_action.triggered.connect(self.copy_exchanges_for_SDF) self.doubleClicked.connect(self.open_activity_tab) @@ -217,6 +234,10 @@ def delete_activities(self) -> None: def duplicate_activities(self) -> None: self.model.duplicate_activities(self.selectedIndexes()) + @Slot(name="duplicateActivitiesToNewLocWithinDb") + def duplicate_activity_to_new_loc(self) -> None: + self.model.duplicate_activity_to_new_loc(self.selectedIndexes()) + @Slot(name="duplicateActivitiesToOtherDb") def duplicate_activities_to_db(self) -> None: self.model.duplicate_activities_to_db(self.selectedIndexes()) diff --git a/activity_browser/ui/tables/models/inventory.py b/activity_browser/ui/tables/models/inventory.py index 10790ec0f..a0495d416 100644 --- a/activity_browser/ui/tables/models/inventory.py +++ b/activity_browser/ui/tables/models/inventory.py @@ -185,6 +185,9 @@ def duplicate_activities(self, proxies: list) -> None: else: signals.duplicate_activity.emit(self.get_key(proxies[0])) + def duplicate_activity_to_new_loc(self, proxies: list) -> None: + signals.duplicate_activity_new_loc.emit(self.get_key(proxies[0])) + def duplicate_activities_to_db(self, proxies: list) -> None: if len(proxies) > 1: keys = [self.get_key(p) for p in proxies] From 7cd8c5975556ed5dd01d4f6696f9b8cc3844c9ae Mon Sep 17 00:00:00 2001 From: marc-vdm Date: Sat, 23 Sep 2023 12:06:31 +0200 Subject: [PATCH 12/72] Implement dialog --- activity_browser/controllers/activity.py | 75 ++++++++++++++++-------- activity_browser/ui/tables/inventory.py | 2 +- activity_browser/ui/widgets/__init__.py | 2 +- activity_browser/ui/widgets/dialog.py | 69 ++++++++++++++++++++++ 4 files changed, 122 insertions(+), 26 deletions(-) diff --git a/activity_browser/controllers/activity.py b/activity_browser/controllers/activity.py index 50a8650c1..33b88ae79 100644 --- a/activity_browser/controllers/activity.py +++ b/activity_browser/controllers/activity.py @@ -12,7 +12,7 @@ from activity_browser.settings import project_settings from activity_browser.signals import signals from activity_browser.ui.wizards import UncertaintyWizard -from ..ui.widgets import ActivityLinkingDialog, ActivityLinkingResultsDialog +from ..ui.widgets import ActivityLinkingDialog, ActivityLinkingResultsDialog, LocationLinkingDialog from .parameter import ParameterController @@ -149,35 +149,62 @@ def duplicate_activity_new_loc(self, data: tuple) -> None: RoW and then GLO, if those don't exist, the exchange is not altered. """ #TODO actually write def, this is just a copy of duplicate_activity above + #TODO write such that it takes only one activity as input activities = self._retrieve_activities(data) - # See also https://github.com/LCA-ActivityBrowser/activity-browser/issues/1042 for what to do # get list of dependent databases for activity and load to MetaDataStore - # get list of all unique locations in the dependent databases - # trigger dialog with autocomplete-writeable-dropdown-list (sorted alphabetically) - # check every exchange (act.technosphere) whether it can be replaced - # write a def that tries to find the processes and potential alternatives + databases = [] for act in activities: - new_code = self.generate_copy_code(act.key) - new_act = act.copy(new_code) - # Update production exchanges - for exc in new_act.production(): - if exc.input.key == act.key: - exc.input = new_act - exc.save() - # Update 'products' - for product in new_act.get('products', []): - if product.get('input') == act.key: - product['input'] = new_act.key - new_act.save() - AB_metadata.update_metadata(new_act.key) - signals.safe_open_activity_tab.emit(new_act.key) + for exch in act.technosphere(): + databases.append(exch['input'][0]) + + # load all dependent databases to MetaDataStore + dbs = [AB_metadata.get_database_metadata(db) for db in databases] + # get list of all unique locations in the dependent databases (sorted alphabetically) + locations = [] + for db in dbs: + locations += db['location'].to_list() # add all locations to one list + locations = list(set(locations)) # reduce the list to only unique items + locations.sort() + + # get the location to relink + db = AB_metadata.get_database_metadata(act.key[0]) + old_location = db.loc[db['key'] == act.key]['location'][0] + + # trigger dialog with autocomplete-writeable-dropdown-list + options = (old_location, locations) + dialog = LocationLinkingDialog.relink_location(options, self.window) + relinking_results = dict() + if dialog.exec_() == LocationLinkingDialog.Accepted: + for old, new in dialog.relink.items(): + relinking_results[old] = new + use_alternatives = dialog.use_alternatives_checkbox.isChecked() - db = next(iter(activities)).get("database") - bw.databases.set_modified(db) - signals.database_changed.emit(db) - signals.databases_changed.emit() + # check every exchange (act.technosphere) whether it can be replaced + # write a def that tries to find the processes and potential alternatives + # use MetaDataStore to quickly find things + + # for act in activities: + # new_code = self.generate_copy_code(act.key) + # new_act = act.copy(new_code) + # # Update production exchanges + # for exc in new_act.production(): + # if exc.input.key == act.key: + # exc.input = new_act + # exc.save() + # # Update 'products' + # for product in new_act.get('products', []): + # if product.get('input') == act.key: + # product['input'] = new_act.key + # new_act.save() + # AB_metadata.update_metadata(new_act.key) + # signals.safe_open_activity_tab.emit(new_act.key) + # + # db = next(iter(activities)).get("database") + # bw.databases.set_modified(db) + # signals.database_changed.emit(db) + # signals.databases_changed.emit() @Slot(tuple, str, name="copyActivityToDbInterface") @Slot(list, str, name="copyActivitiesToDbInterface") diff --git a/activity_browser/ui/tables/inventory.py b/activity_browser/ui/tables/inventory.py index 4fa225c6b..19a1b9907 100644 --- a/activity_browser/ui/tables/inventory.py +++ b/activity_browser/ui/tables/inventory.py @@ -141,7 +141,7 @@ def contextMenuEvent(self, event) -> None: if self.indexAt(event.pos()).row() == -1 and len(self.model._dataframe) != 0: return - if self.selectedIndexes() > 1: + if len(self.selectedIndexes()) > 1: act = 'activities' else: act = 'activity' diff --git a/activity_browser/ui/widgets/__init__.py b/activity_browser/ui/widgets/__init__.py index 6b1e64895..bf8798009 100644 --- a/activity_browser/ui/widgets/__init__.py +++ b/activity_browser/ui/widgets/__init__.py @@ -9,7 +9,7 @@ DatabaseLinkingDialog, DefaultBiosphereDialog, DatabaseLinkingResultsDialog, ActivityLinkingDialog, ActivityLinkingResultsDialog, ProjectDeletionDialog, - ScenarioDatabaseDialog + ScenarioDatabaseDialog, LocationLinkingDialog ) from .line_edit import (SignalledPlainTextEdit, SignalledComboEdit, SignalledLineEdit) diff --git a/activity_browser/ui/widgets/dialog.py b/activity_browser/ui/widgets/dialog.py index 9b7c18e02..3d2a1835d 100644 --- a/activity_browser/ui/widgets/dialog.py +++ b/activity_browser/ui/widgets/dialog.py @@ -1145,3 +1145,72 @@ def construct_dialog(cls, parent: QtWidgets.QWidget = None, options: list = None obj.grid.addWidget(combo, i, 2, 1, 2) obj.updateGeometry() return obj + + +class LocationLinkingDialog(QtWidgets.QDialog): + """Display all of the possible location links in a single dialog for the user. + + Allow users to select alternate location links and an option to link to generic alternatives (GLO, RoW). + """ + def __init__(self, parent=None): + super().__init__(parent) + self.setWindowTitle("Activity Location linking") + + self.loc_label = QtWidgets.QLabel() + self.label_choices = [] + self.grid_box = QtWidgets.QGroupBox("Location link:") + self.grid = QtWidgets.QGridLayout() + self.grid_box.setLayout(self.grid) + + self.use_alternatives_checkbox = QtWidgets.QCheckBox('Use generic alternatives (GLO, RoW) as fallback') + self.use_alternatives_checkbox.setToolTip('If the location is not found, try to match to generic locations ' + 'like GLO and RoW.') + + self.buttons = QtWidgets.QDialogButtonBox( + QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel, + ) + self.buttons.accepted.connect(self.accept) + self.buttons.rejected.connect(self.reject) + + layout = QtWidgets.QVBoxLayout() + layout.addWidget(self.loc_label) + layout.addWidget(self.grid_box) + layout.addWidget(self.use_alternatives_checkbox) + layout.addWidget(self.buttons) + self.setLayout(layout) + + @property + def relink(self) -> dict: + """Returns a dictionary of str -> str key/values, showing which keys + should be linked to which values. + + Only returns key/value pairs if they differ. + """ + return { + label.text(): combo.currentText() for label, combo in self.label_choices + if label.text() != combo.currentText() + } + + @classmethod + def construct_dialog(cls, label: str, options: List[Tuple[str, List[str]]], + parent: QtWidgets.QWidget = None) -> 'LocationLinkingDialog': + obj = cls(parent) + obj.loc_label.setText(label) + # Start at 1 because row 0 is taken up by the loc_label + for i, item in enumerate(options): + label = QtWidgets.QLabel(item[0]) + combo = QtWidgets.QComboBox() + combo.addItems(item[1]) + combo.setCurrentText(item[0]) + obj.label_choices.append((label, combo)) + obj.grid.addWidget(label, i, 0, 1, 2) + obj.grid.addWidget(combo, i, 2, 1, 2) + + obj.updateGeometry() + return obj + + @classmethod + def relink_location(cls, options: List[Tuple[str, List[str]]], + parent=None) -> 'LocationLinkingDialog': + label = "Relinking exchange locations from activity to a new location." + return cls.construct_dialog(label, options, parent) From 8ed34c8dc3082f68d7aa6f9f50ddfdcff7821f09 Mon Sep 17 00:00:00 2001 From: marc-vdm Date: Sat, 23 Sep 2023 23:08:14 +0200 Subject: [PATCH 13/72] Implement finding of suitable alternatives and proper management of failing that. --- activity_browser/controllers/activity.py | 81 ++++++++++++++++++------ activity_browser/ui/tables/inventory.py | 7 +- activity_browser/ui/widgets/dialog.py | 22 ++++--- 3 files changed, 78 insertions(+), 32 deletions(-) diff --git a/activity_browser/controllers/activity.py b/activity_browser/controllers/activity.py index 33b88ae79..86c4cb1b7 100644 --- a/activity_browser/controllers/activity.py +++ b/activity_browser/controllers/activity.py @@ -147,43 +147,86 @@ def duplicate_activity_new_loc(self, data: tuple) -> None: This function will try and link all exchanges in the same location as the production process to a chosen location, if none is available for the given exchange, it will try to link to RoW and then GLO, if those don't exist, the exchange is not altered. + + This def does the following: + - Read all databases in exchanges of activity into MetaDataStore + - Give user dialog to re-link location and potentially use alternatives + - Finds suitable activities with new location (and potentially alternative) + - Re-link exchanges to new (and potentially alternative) location + - Show user what changes are.??? """ #TODO actually write def, this is just a copy of duplicate_activity above - #TODO write such that it takes only one activity as input - activities = self._retrieve_activities(data) + act = self._retrieve_activities(data)[0] # get list of dependent databases for activity and load to MetaDataStore databases = [] - - for act in activities: - for exch in act.technosphere(): - databases.append(exch['input'][0]) + for exch in act.technosphere(): + databases.append(exch['input'][0]) # load all dependent databases to MetaDataStore - dbs = [AB_metadata.get_database_metadata(db) for db in databases] + dbs = {db: AB_metadata.get_database_metadata(db) for db in databases} # get list of all unique locations in the dependent databases (sorted alphabetically) locations = [] - for db in dbs: + for db in dbs.values(): locations += db['location'].to_list() # add all locations to one list locations = list(set(locations)) # reduce the list to only unique items locations.sort() # get the location to relink - db = AB_metadata.get_database_metadata(act.key[0]) - old_location = db.loc[db['key'] == act.key]['location'][0] + db = dbs[act.key[0]] + old_location = db.loc[db['key'] == act.key]['location'].iloc[0] # trigger dialog with autocomplete-writeable-dropdown-list options = (old_location, locations) - dialog = LocationLinkingDialog.relink_location(options, self.window) - relinking_results = dict() - if dialog.exec_() == LocationLinkingDialog.Accepted: - for old, new in dialog.relink.items(): - relinking_results[old] = new - use_alternatives = dialog.use_alternatives_checkbox.isChecked() + dialog = LocationLinkingDialog.relink_location(act['name'], options, self.window) + if dialog.exec_() != LocationLinkingDialog.Accepted: + # if the dialog accept button is not clicked, do nothing + return + + # read the data from the dialog + for old, new in dialog.relink.items(): + new_location = new + use_alternatives = dialog.use_alternatives_checkbox.isChecked() + + succesful_links = {} # key: old key, value: new key + alternatives = ['RoW', 'GLO'] # alternatives to try to match to + # get exchanges to re-link + for exch in act.technosphere(): + db = dbs[exch['input'][0]] + if db.loc[db['key'] == exch['input']]['location'].iloc[0] != old_location: + continue # this exchange has a location we're not trying to re-link, continue + + # get relevant data to match on + row = db.loc[db['key'] == exch['input']] + name = row['name'].iloc[0] + prod = row['reference product'].iloc[0] + unit = row['unit'].iloc[0] + + # get candidates to match + candidates = db.loc[(db['name'] == name) + & (db['reference product'] == prod) + & (db['unit'] == unit)] + if len(candidates) <= 1: + continue # this activity does not exist in this database with another location (1 is self), continue + + # check candidates for new_location + candidate = candidates.loc[candidates['location'] == new_location] + if len(candidate) == 0 and not use_alternatives: + continue # there is no candidate, continue + elif len(candidate) > 1: + continue # there is more than one candidate, we can't know what to use, continue + elif len(candidate) == 0: + # there are no candidates, but we can try alternatives + for alt in alternatives: + candidate = candidates.loc[candidates['location'] == alt] + if len(candidate) != 0: + break # found an alternative in with this alternative location, stop looking + + # at this point, we have found 1 suitable candidate, whether that is new_location or alternative location + succesful_links[exch['input']] = candidate['key'].iloc[0] + + # now, create a new activity and do the actual re-linking - # check every exchange (act.technosphere) whether it can be replaced - # write a def that tries to find the processes and potential alternatives - # use MetaDataStore to quickly find things # for act in activities: # new_code = self.generate_copy_code(act.key) diff --git a/activity_browser/ui/tables/inventory.py b/activity_browser/ui/tables/inventory.py index 19a1b9907..60c8998d2 100644 --- a/activity_browser/ui/tables/inventory.py +++ b/activity_browser/ui/tables/inventory.py @@ -118,7 +118,7 @@ def __init__(self, parent=None): ) self.duplicate_activity_new_loc_action.setToolTip( "Duplicate this activity to another location.\n" - "Link the exchanges to a new location if it is availabe.") + "Link the exchanges to a new location if it is availabe.") # only for 1 activity self.delete_activity_action = QtWidgets.QAction( qicons.delete, "Delete activity/-ies", None ) @@ -143,8 +143,11 @@ def contextMenuEvent(self, event) -> None: if len(self.selectedIndexes()) > 1: act = 'activities' + self.duplicate_activity_new_loc_action.setEnabled(False) else: act = 'activity' + self.duplicate_activity_new_loc_action.setEnabled(True) + self.duplicate_activity_action.setText("Duplicate {}".format(act)) self.delete_activity_action.setText("Delete {}".format(act)) @@ -165,8 +168,6 @@ def contextMenuEvent(self, event) -> None: menu.addAction(self.new_activity_action) menu.addAction(self.duplicate_activity_action) menu.addAction(self.duplicate_activity_new_loc_action) - if len(self.selectedIndexes()) > 1: - self.duplicate_activity_new_loc_action.setEnabled(False) menu.addAction(self.delete_activity_action) menu.addAction( qicons.edit, "Relink the activity exchanges", diff --git a/activity_browser/ui/widgets/dialog.py b/activity_browser/ui/widgets/dialog.py index 3d2a1835d..a73848c64 100644 --- a/activity_browser/ui/widgets/dialog.py +++ b/activity_browser/ui/widgets/dialog.py @@ -1194,23 +1194,25 @@ def relink(self) -> dict: @classmethod def construct_dialog(cls, label: str, options: List[Tuple[str, List[str]]], parent: QtWidgets.QWidget = None) -> 'LocationLinkingDialog': + loc, locs = options + obj = cls(parent) obj.loc_label.setText(label) + + label = QtWidgets.QLabel(loc) + combo = QtWidgets.QComboBox() + combo.addItems(locs) + combo.setCurrentText(loc) + obj.label_choices.append((label, combo)) # Start at 1 because row 0 is taken up by the loc_label - for i, item in enumerate(options): - label = QtWidgets.QLabel(item[0]) - combo = QtWidgets.QComboBox() - combo.addItems(item[1]) - combo.setCurrentText(item[0]) - obj.label_choices.append((label, combo)) - obj.grid.addWidget(label, i, 0, 1, 2) - obj.grid.addWidget(combo, i, 2, 1, 2) + obj.grid.addWidget(label, 0, 0, 1, 2) + obj.grid.addWidget(combo, 0, 2, 1, 2) obj.updateGeometry() return obj @classmethod - def relink_location(cls, options: List[Tuple[str, List[str]]], + def relink_location(cls, act_name: str, options: List[Tuple[str, List[str]]], parent=None) -> 'LocationLinkingDialog': - label = "Relinking exchange locations from activity to a new location." + label = "Relinking exchanges from activity '{}' to a new location.".format(act_name) return cls.construct_dialog(label, options, parent) From c666159a8235cac139834220a64407dbf0a8e122 Mon Sep 17 00:00:00 2001 From: marc-vdm Date: Mon, 25 Sep 2023 20:49:43 +0200 Subject: [PATCH 14/72] Implement actual relinking --- activity_browser/controllers/activity.py | 109 +++++++++++++++------- activity_browser/layouts/tabs/activity.py | 1 + activity_browser/signals.py | 1 + activity_browser/ui/widgets/activity.py | 2 +- activity_browser/ui/widgets/dialog.py | 4 +- 5 files changed, 81 insertions(+), 36 deletions(-) diff --git a/activity_browser/controllers/activity.py b/activity_browser/controllers/activity.py index 86c4cb1b7..943b9a9d7 100644 --- a/activity_browser/controllers/activity.py +++ b/activity_browser/controllers/activity.py @@ -155,13 +155,12 @@ def duplicate_activity_new_loc(self, data: tuple) -> None: - Re-link exchanges to new (and potentially alternative) location - Show user what changes are.??? """ - #TODO actually write def, this is just a copy of duplicate_activity above - act = self._retrieve_activities(data)[0] + act = self._retrieve_activities(data)[0] # we only take one activity but this function always returns list # get list of dependent databases for activity and load to MetaDataStore databases = [] for exch in act.technosphere(): - databases.append(exch['input'][0]) + databases.append(exch.input[0]) # load all dependent databases to MetaDataStore dbs = {db: AB_metadata.get_database_metadata(db) for db in databases} @@ -188,16 +187,18 @@ def duplicate_activity_new_loc(self, data: tuple) -> None: new_location = new use_alternatives = dialog.use_alternatives_checkbox.isChecked() - succesful_links = {} # key: old key, value: new key + keep_exch = [] # keep these exchanges + succesful_links = {} # dict of dicts, key of new exch : {new values} <-- see 'values' below alternatives = ['RoW', 'GLO'] # alternatives to try to match to # get exchanges to re-link for exch in act.technosphere(): - db = dbs[exch['input'][0]] - if db.loc[db['key'] == exch['input']]['location'].iloc[0] != old_location: + db = dbs[exch.input[0]] + if db.loc[db['key'] == exch.input]['location'].iloc[0] != old_location: + keep_exch.append(exch.input) continue # this exchange has a location we're not trying to re-link, continue # get relevant data to match on - row = db.loc[db['key'] == exch['input']] + row = db.loc[db['key'] == exch.input] name = row['name'].iloc[0] prod = row['reference product'].iloc[0] unit = row['unit'].iloc[0] @@ -207,47 +208,68 @@ def duplicate_activity_new_loc(self, data: tuple) -> None: & (db['reference product'] == prod) & (db['unit'] == unit)] if len(candidates) <= 1: + keep_exch.append(exch.input) continue # this activity does not exist in this database with another location (1 is self), continue # check candidates for new_location candidate = candidates.loc[candidates['location'] == new_location] if len(candidate) == 0 and not use_alternatives: + keep_exch.append(exch.input) continue # there is no candidate, continue elif len(candidate) > 1: + keep_exch.append(exch.input) continue # there is more than one candidate, we can't know what to use, continue elif len(candidate) == 0: # there are no candidates, but we can try alternatives + no_alt = True for alt in alternatives: candidate = candidates.loc[candidates['location'] == alt] if len(candidate) != 0: + no_alt = False break # found an alternative in with this alternative location, stop looking + if no_alt: + # no alternative found, despite alternatives + keep_exch.append(exch.input) # at this point, we have found 1 suitable candidate, whether that is new_location or alternative location - succesful_links[exch['input']] = candidate['key'].iloc[0] - - # now, create a new activity and do the actual re-linking - - - # for act in activities: - # new_code = self.generate_copy_code(act.key) - # new_act = act.copy(new_code) - # # Update production exchanges - # for exc in new_act.production(): - # if exc.input.key == act.key: - # exc.input = new_act - # exc.save() - # # Update 'products' - # for product in new_act.get('products', []): - # if product.get('input') == act.key: - # product['input'] = new_act.key - # new_act.save() - # AB_metadata.update_metadata(new_act.key) - # signals.safe_open_activity_tab.emit(new_act.key) - # - # db = next(iter(activities)).get("database") - # bw.databases.set_modified(db) - # signals.database_changed.emit(db) - # signals.databases_changed.emit() + values = { + 'amount': exch.get('amount', False), + 'comment': exch.get('comment', False), + 'formula': exch.get('formula', False), + 'uncertainty': exch.get('uncertainty', False) + } + succesful_links[candidate['key'].iloc[0]] = values + + # now, create a new activity by copying the old one + db_name = act.key[0] + new_code = self.generate_copy_code(act.key) + new_act = act.copy(new_code) + # update production exchanges + for exc in new_act.production(): + if exc.input.key == act.key: + exc.input = new_act + exc.save() + # update 'products' + for product in new_act.get('products', []): + if product.get('input') == act.key: + product.input = new_act.key + new_act.save() + # save the new location to the activity + self.modify_activity(new_act.key, 'location', new_location) + # delete old exchanges + delete_exch = [exch for exch in new_act.technosphere() if exch.input not in keep_exch] + signals.exchanges_deleted.emit(delete_exch) + # add the new exchanges with all values carried over from last exch + signals.exchanges_add_w_values.emit(list(succesful_links.keys()), new_act.key, succesful_links) + + # update the MetaDataStore and open new activity + AB_metadata.update_metadata(new_act.key) + signals.safe_open_activity_tab.emit(new_act.key) + + # send signals to relevant locations + bw.databases.set_modified(db_name) + signals.database_changed.emit(db_name) + signals.databases_changed.emit() @Slot(tuple, str, name="copyActivityToDbInterface") @Slot(list, str, name="copyActivitiesToDbInterface") @@ -348,13 +370,29 @@ def __init__(self, parent=None): signals.exchanges_deleted.connect(self.delete_exchanges) signals.exchanges_add.connect(self.add_exchanges) + signals.exchanges_add_w_values.connect(self.add_exchanges) signals.exchange_modified.connect(self.modify_exchange) signals.exchange_uncertainty_wizard.connect(self.edit_exchange_uncertainty) signals.exchange_uncertainty_modified.connect(self.modify_exchange_uncertainty) signals.exchange_pedigree_modified.connect(self.modify_exchange_pedigree) @Slot(list, tuple, name="addExchangesToKey") - def add_exchanges(self, from_keys: Iterator[tuple], to_key: tuple) -> None: + def add_exchanges(self, from_keys: Iterator[tuple], to_key: tuple, new_values: dict = {}) -> None: + """ + Add new exchanges. + + Optionally add new values also. + + Parameters + ---------- + from_keys: The activities (keys) to create exchanges from + to_key: The activity (key) to create an exchange to + new_values: Values of the exchange, dict (from_keys as keys) with field names and values for the exchange + + Returns + ------- + + """ activity = bw.get_activity(to_key) for key in from_keys: technosphere_db = bc.is_technosphere_db(key[0]) @@ -365,6 +403,11 @@ def add_exchanges(self, from_keys: Iterator[tuple], to_key: tuple) -> None: exc['type'] = 'biosphere' else: exc['type'] = 'unknown' + # add optional exchange values + if new_vals := new_values.get(key, {}): + for field_name, value in new_vals.items(): + if value: + exc[field_name] = value exc.save() bw.databases.set_modified(to_key[0]) AB_metadata.update_metadata(to_key) diff --git a/activity_browser/layouts/tabs/activity.py b/activity_browser/layouts/tabs/activity.py index 40875b3f6..76ec878e1 100644 --- a/activity_browser/layouts/tabs/activity.py +++ b/activity_browser/layouts/tabs/activity.py @@ -157,6 +157,7 @@ def __init__(self, key: tuple, parent=None, read_only=True): self.grouped_tables = [DetailsGroupBox(l, t) for l, t in self.exchange_tables] # activity-specific data displayed and editable near the top of the tab + # this contains: activity name, location, database self.activity_data_grid = ActivityDataGrid(read_only=self.read_only, parent=self) self.db_read_only_changed(db_name=self.db_name, db_read_only=self.db_read_only) diff --git a/activity_browser/signals.py b/activity_browser/signals.py index 5eb4a15ab..a949a950f 100644 --- a/activity_browser/signals.py +++ b/activity_browser/signals.py @@ -65,6 +65,7 @@ class Signals(QObject): # Exchanges exchanges_deleted = Signal(list) # These exchanges should be deleted | list of exchange keys exchanges_add = Signal(list, tuple) # Add these exchanges to this activity | list of exchange keys to be added, key of target activity + exchanges_add_w_values = Signal(list, tuple, dict) # Add these exchanges to this activity with these values| list of exchange keys to be added, key of target activity, values to add per exchange exchange_modified = Signal(object, str, object) # This was changed about this exchange | exchange object, name of the changed field, new content of the field # Exchange object and uncertainty dictionary exchange_uncertainty_wizard = Signal(object) # Trigger uncertainty dialog for this exchange | exchange object diff --git a/activity_browser/ui/widgets/activity.py b/activity_browser/ui/widgets/activity.py index c88fcc3e1..75fdd1367 100644 --- a/activity_browser/ui/widgets/activity.py +++ b/activity_browser/ui/widgets/activity.py @@ -160,7 +160,7 @@ def duplicate_confirm_dialog(self, target_db): """ Get user confirmation for duplication action """ title = "Duplicate activity to new database" text = "Copy {} to {} and open as new tab?".format( - self.parent.activity.get('name', 'Error: Name of Act not found'), target_db) + self.parent.activity.get('name', 'Error: Name of activity not found'), target_db) user_choice = QMessageBox.question(self, title, text, QMessageBox.Yes | QMessageBox.No, QMessageBox.No) if user_choice == QMessageBox.Yes: diff --git a/activity_browser/ui/widgets/dialog.py b/activity_browser/ui/widgets/dialog.py index a73848c64..0cb1565da 100644 --- a/activity_browser/ui/widgets/dialog.py +++ b/activity_browser/ui/widgets/dialog.py @@ -1162,9 +1162,9 @@ def __init__(self, parent=None): self.grid = QtWidgets.QGridLayout() self.grid_box.setLayout(self.grid) - self.use_alternatives_checkbox = QtWidgets.QCheckBox('Use generic alternatives (GLO, RoW) as fallback') + self.use_alternatives_checkbox = QtWidgets.QCheckBox('Use generic alternatives (RoW, GLO) as fallback') self.use_alternatives_checkbox.setToolTip('If the location is not found, try to match to generic locations ' - 'like GLO and RoW.') + 'RoW or GLO (in that order).') self.buttons = QtWidgets.QDialogButtonBox( QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel, From 5e97cfaa5fa5ccbc5b1bdb4d106ee1c0e4b9216c Mon Sep 17 00:00:00 2001 From: marc-vdm Date: Tue, 26 Sep 2023 11:32:01 +0200 Subject: [PATCH 15/72] Minor documentation improvements + store exchanges to remove instead of to keep. --- activity_browser/controllers/activity.py | 34 +++++++++++++----------- activity_browser/signals.py | 2 +- 2 files changed, 19 insertions(+), 17 deletions(-) diff --git a/activity_browser/controllers/activity.py b/activity_browser/controllers/activity.py index 943b9a9d7..abb2bd036 100644 --- a/activity_browser/controllers/activity.py +++ b/activity_browser/controllers/activity.py @@ -141,7 +141,7 @@ def duplicate_activity(self, data: Union[tuple, Iterator[tuple]]) -> None: signals.databases_changed.emit() @Slot(tuple, name="copyActivityNewLoc") - def duplicate_activity_new_loc(self, data: tuple) -> None: + def duplicate_activity_new_loc(self, old_key: tuple) -> None: """Duplicates the selected activity in the same db, links to new location, with a new BW code. This function will try and link all exchanges in the same location as the production process @@ -153,9 +153,15 @@ def duplicate_activity_new_loc(self, data: tuple) -> None: - Give user dialog to re-link location and potentially use alternatives - Finds suitable activities with new location (and potentially alternative) - Re-link exchanges to new (and potentially alternative) location - - Show user what changes are.??? + + Parameters + ---------- + old_key: the key of the activity to re-link to a different location + + Returns + ------- """ - act = self._retrieve_activities(data)[0] # we only take one activity but this function always returns list + act = self._retrieve_activities(old_key)[0] # we only take one activity but this function always returns list # get list of dependent databases for activity and load to MetaDataStore databases = [] @@ -187,14 +193,18 @@ def duplicate_activity_new_loc(self, data: tuple) -> None: new_location = new use_alternatives = dialog.use_alternatives_checkbox.isChecked() - keep_exch = [] # keep these exchanges + del_exch = [] # delete these exchanges succesful_links = {} # dict of dicts, key of new exch : {new values} <-- see 'values' below alternatives = ['RoW', 'GLO'] # alternatives to try to match to + # in the future, 'alternatives' could be improved by making use of some location hierarchy. From that we could + # get things like if the new location is NL but there is no NL, but RER exists, we use that. However, for that + # we need some hierarchical structure to the location data, which may be available from ecoinvent, but we need + # to look for that. + # get exchanges to re-link for exch in act.technosphere(): db = dbs[exch.input[0]] if db.loc[db['key'] == exch.input]['location'].iloc[0] != old_location: - keep_exch.append(exch.input) continue # this exchange has a location we're not trying to re-link, continue # get relevant data to match on @@ -203,35 +213,28 @@ def duplicate_activity_new_loc(self, data: tuple) -> None: prod = row['reference product'].iloc[0] unit = row['unit'].iloc[0] - # get candidates to match + # get candidates to match (must have same name, product and unit) candidates = db.loc[(db['name'] == name) & (db['reference product'] == prod) & (db['unit'] == unit)] if len(candidates) <= 1: - keep_exch.append(exch.input) continue # this activity does not exist in this database with another location (1 is self), continue # check candidates for new_location candidate = candidates.loc[candidates['location'] == new_location] if len(candidate) == 0 and not use_alternatives: - keep_exch.append(exch.input) continue # there is no candidate, continue elif len(candidate) > 1: - keep_exch.append(exch.input) continue # there is more than one candidate, we can't know what to use, continue elif len(candidate) == 0: # there are no candidates, but we can try alternatives - no_alt = True for alt in alternatives: candidate = candidates.loc[candidates['location'] == alt] if len(candidate) != 0: - no_alt = False break # found an alternative in with this alternative location, stop looking - if no_alt: - # no alternative found, despite alternatives - keep_exch.append(exch.input) # at this point, we have found 1 suitable candidate, whether that is new_location or alternative location + del_exch.append(exch) values = { 'amount': exch.get('amount', False), 'comment': exch.get('comment', False), @@ -257,8 +260,7 @@ def duplicate_activity_new_loc(self, data: tuple) -> None: # save the new location to the activity self.modify_activity(new_act.key, 'location', new_location) # delete old exchanges - delete_exch = [exch for exch in new_act.technosphere() if exch.input not in keep_exch] - signals.exchanges_deleted.emit(delete_exch) + signals.exchanges_deleted.emit(del_exch) # add the new exchanges with all values carried over from last exch signals.exchanges_add_w_values.emit(list(succesful_links.keys()), new_act.key, succesful_links) diff --git a/activity_browser/signals.py b/activity_browser/signals.py index a949a950f..db0454e75 100644 --- a/activity_browser/signals.py +++ b/activity_browser/signals.py @@ -44,7 +44,7 @@ class Signals(QObject): new_activity = Signal(str) # Trigger dialog to create a new activity in this database | name of database add_activity_to_history = Signal(tuple) # Add this activity to history | key of activity duplicate_activity = Signal(tuple) # Duplicate this activity | key of activity - duplicate_activity_new_loc = Signal(tuple) # Duplicate this activity to a new location | key of activity + duplicate_activity_new_loc = Signal(tuple) # Trigger dialog to duplicate this activity to a new location | key of activity duplicate_activities = Signal(list) # Duplicate these activities | list of activity keys duplicate_activity_to_db = Signal(str, object) # Duplicate this activity to another database | name of target database, BW2 actiivty object #TODO write below 2 signals to work without the str, source database is already stored in activity keys From c3972b23a9859c763b3ce5e367c0a834d7ded264 Mon Sep 17 00:00:00 2001 From: marc-vdm Date: Fri, 22 Sep 2023 10:49:40 +0200 Subject: [PATCH 16/72] Minor documentation + code improvements --- activity_browser/bwutils/multilca.py | 5 +++-- activity_browser/layouts/tabs/LCA_results_tab.py | 5 +---- activity_browser/ui/tables/models/lca_results.py | 3 ++- activity_browser/ui/web/sankey_navigator.py | 3 +-- 4 files changed, 7 insertions(+), 9 deletions(-) diff --git a/activity_browser/bwutils/multilca.py b/activity_browser/bwutils/multilca.py index 599687653..ff51343e4 100644 --- a/activity_browser/bwutils/multilca.py +++ b/activity_browser/bwutils/multilca.py @@ -8,6 +8,8 @@ ca = ContributionAnalysis() +from collections import OrderedDict + from .commontasks import wrap_text from .metadata import AB_metadata from .errors import ReferenceFlowValueError @@ -391,7 +393,7 @@ def _build_dict(self, C, FU_M_index, rev_dict, limit, limit_type): topcontribution_dict = dict() for fu_or_method, col in FU_M_index.items(): top_contribution = ca.sort_array(C[col, :], limit=limit, limit_type=limit_type) - cont_per = dict() + cont_per = OrderedDict() cont_per.update({ ('Total', ''): C[col, :].sum(), ('Rest', ''): C[col, :].sum() - top_contribution[:, 0].sum(), @@ -401,7 +403,6 @@ def _build_dict(self, C, FU_M_index, rev_dict, limit, limit_type): topcontribution_dict.update({fu_or_method: cont_per}) return topcontribution_dict - @staticmethod def get_labels(key_list, fields=None, separator=' | ', max_length=False, mask=None): diff --git a/activity_browser/layouts/tabs/LCA_results_tab.py b/activity_browser/layouts/tabs/LCA_results_tab.py index 065cbb8e0..7a484eb8a 100644 --- a/activity_browser/layouts/tabs/LCA_results_tab.py +++ b/activity_browser/layouts/tabs/LCA_results_tab.py @@ -23,7 +23,6 @@ def __init__(self, parent): super(LCAResultsTab, self).__init__(parent) self.setMovable(True) - # self.setTabShape(1) # Triangular-shaped Tabs self.setTabsClosable(True) # Generate layout @@ -68,6 +67,7 @@ def generate_setup(self, data: dict): except (BW2CalcError, ABError) as e: initial, *other = e.args log.error(traceback.format_exc()) + QApplication.restoreOverrideCursor() msg = QMessageBox( QMessageBox.Warning, "Calculation problem", str(initial), QMessageBox.Ok, self @@ -76,6 +76,3 @@ def generate_setup(self, data: dict): if other: msg.setDetailedText("\n".join(other)) msg.exec_() - except ABError as e: - QApplication.restoreOverrideCursor() - diff --git a/activity_browser/ui/tables/models/lca_results.py b/activity_browser/ui/tables/models/lca_results.py index 8000b4e4f..0f35e74df 100644 --- a/activity_browser/ui/tables/models/lca_results.py +++ b/activity_browser/ui/tables/models/lca_results.py @@ -23,6 +23,7 @@ def sync(self, df): class ContributionModel(PandasModel): def sync(self, df): self._dataframe = df.replace(np.nan, '', regex=True) + # drop the 'rest' row if empty if self._dataframe.select_dtypes(include=np.number).iloc[1, :].sum() == 0: - self._dataframe.drop(1, inplace=True) + self._dataframe.drop(labels=1, inplace=True) self.updated.emit() diff --git a/activity_browser/ui/web/sankey_navigator.py b/activity_browser/ui/web/sankey_navigator.py index ca5c6f5d3..1d2f73b94 100644 --- a/activity_browser/ui/web/sankey_navigator.py +++ b/activity_browser/ui/web/sankey_navigator.py @@ -244,13 +244,12 @@ def update_sankey(self, demand, method, scenario_index: int = None, method_index else: data = bw.GraphTraversal().calculate(demand, method, cutoff=cut_off, max_calc=max_calc) - except ValueError as e: + except (ValueError, ZeroDivisionError) as e: QtWidgets.QMessageBox.information(None, "Not possible.", str(e)) log.info("Completed graph traversal ({:.2g} seconds, {} iterations)".format(time.time() - start, data["counter"])) self.graph.new_graph(data) self.has_sankey = bool(self.graph.json_data) - # print("emitting graph ready signal") self.send_json() def set_database(self, name): From 21b8d8d4a6a0f731a101fbe81ac1fd3038745672 Mon Sep 17 00:00:00 2001 From: marc-vdm Date: Thu, 28 Sep 2023 21:15:18 +0200 Subject: [PATCH 17/72] Resolve #782 + improve documentation of `Contributions` class --- activity_browser/bwutils/multilca.py | 229 ++++++++++----------------- 1 file changed, 86 insertions(+), 143 deletions(-) diff --git a/activity_browser/bwutils/multilca.py b/activity_browser/bwutils/multilca.py index ff51343e4..44021556f 100644 --- a/activity_browser/bwutils/multilca.py +++ b/activity_browser/bwutils/multilca.py @@ -348,55 +348,45 @@ def __init__(self, mlca): "technosphere": (self.mlca.rev_activity_dict, self.mlca.lca.activity_dict, self.act_fields), } - def normalize(self, contribution_array): + def normalize(self, contribution_array: np.ndarray) -> np.ndarray: """Normalise the contribution array. Parameters ---------- - contribution_array : `numpy.ndarray` - A 2-dimensional contribution array + contribution_array : A 2-dimensional contribution array Returns ------- - `numpy.ndarray` - 2-dimensional array of same shape, with scores normalized. + 2-dimensional array of same shape, with scores normalized. """ - scores = abs(contribution_array).sum(axis=1, keepdims=True) + scores = abs(contribution_array.sum(axis=1, keepdims=True)) return contribution_array / scores - def _build_dict(self, C, FU_M_index, rev_dict, limit, limit_type): + def _build_dict(self, contributions: np.ndarray, FU_M_index: dict, + rev_dict: dict, limit: int, limit_type: str) -> dict: """Sort the given contribution array on method or reference flow column. Parameters ---------- - C : `numpy.ndarray` - A 2-dimensional contribution array - FU_M_index : dict - Dictionary which maps the reference flows or methods to their - matching columns - rev_dict : dict - 'reverse' dictionary used to map correct activity/method to - its value - limit : int - Number of top-contributing items to include - limit_type : str - Either "number" or "percent", ContributionAnalysis.sort_array - for complete explanation + contributions: A 2-dimensional contribution array + FU_M_index : Dictionary which maps the reference flows or methods to their matching columns + rev_dict : 'reverse' dictionary used to map correct activity/method to its value + limit : Number of top-contributing items to include + limit_type : Either "number" or "percent", ContributionAnalysis.sort_array for complete explanation Returns ------- - dict - Top-contributing flows per method or activity + Top-contributing flows per method or activity """ topcontribution_dict = dict() for fu_or_method, col in FU_M_index.items(): - top_contribution = ca.sort_array(C[col, :], limit=limit, limit_type=limit_type) + top_contribution = ca.sort_array(contributions[col, :], limit=limit, limit_type=limit_type) cont_per = OrderedDict() cont_per.update({ - ('Total', ''): C[col, :].sum(), - ('Rest', ''): C[col, :].sum() - top_contribution[:, 0].sum(), + ('Total', ''): contributions[col, :].sum(), + ('Rest', ''): contributions[col, :].sum() - top_contribution[:, 0].sum(), }) for value, index in top_contribution: cont_per.update({rev_dict[index]: value}) @@ -404,8 +394,8 @@ def _build_dict(self, C, FU_M_index, rev_dict, limit, limit_type): return topcontribution_dict @staticmethod - def get_labels(key_list, fields=None, separator=' | ', - max_length=False, mask=None): + def get_labels(key_list: pd.MultiIndex, fields: Optional[list] = None, separator: str = ' | ', + max_length: int = False, mask: Optional[list] = None) -> list: """Generate labels from metadata information. Setting max_length will wrap the label into a multi-line string if @@ -413,23 +403,16 @@ def get_labels(key_list, fields=None, separator=' | ', Parameters ---------- - key_list : `pandas.MultiIndex` - An index containing 'keys' to be retrieved from the MetaDataStore - fields : list - List of column-names to be included from the MetaDataStore - separator : str - Specific separator to use when joining strings together - max_length : int - Allowed character length before string is wrapped over multiple - lines - mask : list - Instead of the metadata, this list is used to check keys against. + key_list : An index containing 'keys' to be retrieved from the MetaDataStore + fields : List of column-names to be included from the MetaDataStore + separator : Specific separator to use when joining strings together + max_length : Allowed character length before string is wrapped over multiple lines + mask : Instead of the metadata, this list is used to check keys against. Use if data is aggregated or keys do not exist in MetaDataStore Returns ------- - list - Translated and/or joined (and wrapped) labels matching the keys + Translated and/or joined (and wrapped) labels matching the keys """ fields = fields if fields else ['name', 'reference product', 'location', 'database'] @@ -449,8 +432,8 @@ def get_labels(key_list, fields=None, separator=' | ', return translated_keys @classmethod - def join_df_with_metadata(cls, df, x_fields=None, y_fields=None, - special_keys=None): + def join_df_with_metadata(cls, df: pd.DataFrame, x_fields: Optional[list] = None, y_fields: Optional[list] = None, + special_keys: Optional[list] = None) -> pd.DataFrame: """Join a dataframe that has keys on the index with metadata. Metadata fields are defined in x_fields. @@ -459,24 +442,19 @@ def join_df_with_metadata(cls, df, x_fields=None, y_fields=None, Parameters ---------- - df : `pandas.DataFrame` - Simple DataFrame containing processed data - x_fields : list - List of additional columns to add from the MetaDataStore - y_fields : list - List of column keys for the data in the df dataframe - special_keys : list - List of specific items to place at the top of the dataframe + df : Simple DataFrame containing processed data + x_fields : List of additional columns to add from the MetaDataStore + y_fields : List of column keys for the data in the df dataframe + special_keys : List of specific items to place at the top of the dataframe Returns ------- - `pandas.DataFrame` - Expanded and metadata-annotated dataframe + Expanded and metadata-annotated dataframe """ # replace column keys with labels - df.columns = cls.get_labels(df.columns, fields=y_fields)#, separator='\n') + df.columns = cls.get_labels(df.columns, fields=y_fields) # Coerce index to MultiIndex if it currently isn't if not isinstance(df.index, pd.MultiIndex): df.index = pd.MultiIndex.from_tuples(df.index) @@ -498,27 +476,20 @@ def join_df_with_metadata(cls, df, x_fields=None, y_fields=None, joined.index = cls.get_labels(joined.index, fields=x_fields) return joined - def get_labelled_contribution_dict(self, cont_dict, x_fields=None, - y_fields=None, mask=None): + def get_labelled_contribution_dict(self, cont_dict: dict, x_fields: list = None, + y_fields: list = None, mask: list = None) -> pd.DataFrame: """Annotate the contribution dict with metadata. Parameters ---------- - cont_dict : dict - Holds the contribution data connected to the functions of methods - x_fields : list - X-axis fieldnames, these are usually the indexes/keys of specific - processes - y_fields : list - Column names specific to the cont_dict to be labelled - mask : list - Used in case of aggregation or special cases where the usual - way of using the metadata cannot be used + cont_dict : Holds the contribution data connected to the functions of methods + x_fields : X-axis fieldnames, these are usually the indexes/keys of specific processes + y_fields : Column names specific to the cont_dict to be labelled + mask : Used in case of aggregation or special cases where the usual way of using the metadata cannot be used Returns ------- - `pandas.DataFrame` - Annotated contribution dict inside a pandas dataframe + Annotated contribution dict inside a pandas dataframe """ dfs = ( @@ -536,7 +507,6 @@ def get_labelled_contribution_dict(self, cont_dict, x_fields=None, index = df.loc[df.index.difference(special_keys)].replace(0, np.nan).dropna(how='all').index.union(special_keys) df = df.loc[index] - joined = None if not mask: joined = self.join_df_with_metadata( df, x_fields=x_fields, y_fields=y_fields, @@ -553,13 +523,10 @@ def get_labelled_contribution_dict(self, cont_dict, x_fields=None, joined = df if joined is not None: return joined.reset_index(drop=False) - return @staticmethod def adjust_table_unit(df: pd.DataFrame, method: Optional[tuple]) -> pd.DataFrame: - """Given a dataframe, adjust the unit of the table to either match the - given method, or not exist. - """ + """Given a dataframe, adjust the unit of the table to either match the given method, or not exist.""" if "unit" not in df.columns: return df keys = df.index[~df["index"].isin({"Total", "Rest"})] @@ -578,9 +545,8 @@ def _build_inventory(inventory: dict, indices: dict, columns: list, joined.reset_index(inplace=True, drop=True) return joined - def inventory_df(self, inventory_type: str, columns: set = {'name', 'database', 'code'}): - """Returns an inventory dataframe with metadata of the given type. - """ + def inventory_df(self, inventory_type: str, columns: set = {'name', 'database', 'code'}) -> pd.DataFrame: + """Return an inventory dataframe with metadata of the given type.""" try: data = self.inventory_data[inventory_type] appending = columns.difference(set(data[3])) @@ -613,9 +579,8 @@ def _build_lca_scores_df(self, scores: np.ndarray) -> pd.DataFrame: joined = joined.loc[:, col_order.append(methods)] return joined.reset_index(drop=False) - def lca_scores_df(self, normalized=False) -> pd.DataFrame: - """Returns a metadata-annotated DataFrame of the LCA scores. - """ + def lca_scores_df(self, normalized: bool = False) -> pd.DataFrame: + """Return a metadata-annotated DataFrame of the LCA scores.""" scores = self.mlca.lca_scores if not normalized else self.mlca.lca_scores_normalized return self._build_lca_scores_df(scores) @@ -625,8 +590,7 @@ def _build_contributions(data: np.ndarray, index: int, axis: int) -> np.ndarray: def get_contributions(self, contribution, functional_unit=None, method=None) -> np.ndarray: - """Return a contribution matrix given the type and fu / method - """ + """Return a contribution matrix given the type and fu / method.""" if all([functional_unit, method]) or not any([functional_unit, method]): raise ValueError( "It must be either by reference flow or by impact category. Provided:" @@ -645,40 +609,32 @@ def get_contributions(self, contribution, functional_unit=None, dataset[contribution], self.mlca.func_key_dict[functional_unit], 0 ) - def aggregate_by_parameters(self, C: np.ndarray, inventory: str, + def aggregate_by_parameters(self, contributions: np.ndarray, inventory: str, parameters: Union[str, list] = None): - """Perform aggregation of the contribution data given parameters + """Perform aggregation of the contribution data given parameters. Parameters ---------- - C : `numpy.ndarray` - 2-dimensional contribution array - inventory: str - Either 'biosphere' or 'technosphere', used to determine which - inventory to use - parameters : str or list - One or more parameters by which to aggregate the given contribution - array. + contributions : 2-dimensional contribution array + inventory : Either 'biosphere' or 'technosphere', used to determine which inventory to use + parameters : One or more parameters by which to aggregate the given contribution array. Returns ------- - `numpy.ndarray` + aggregated : pd.DataFrame The aggregated 2-dimensional contribution array mask_index : dict - Contains all of the values of the aggregation mask, linked to - their indexes + Contains all of the values of the aggregation mask, linked to their indexes mask : list or dictview or None An optional list or dictview of the mask_index values - ------- - """ rev_index, keys, fields = self.aggregate_data[inventory] if not parameters: - return C, rev_index, None + return contributions, rev_index, None - df = pd.DataFrame(C).T - columns = list(range(C.shape[0])) + df = pd.DataFrame(contributions).T + columns = list(range(contributions.shape[0])) df.index = pd.MultiIndex.from_tuples(rev_index.values()) metadata = AB_metadata.get_metadata(list(keys), fields) @@ -695,7 +651,7 @@ def _contribution_rows(self, contribution: str, aggregator=None): return self.act_fields if contribution == self.ACT else self.ef_fields return aggregator if isinstance(aggregator, list) else [aggregator] - def _correct_method_index(self, mthd_indx): + def _correct_method_index(self, mthd_indx: list) -> dict: """ A method for amending the tuples for impact method labels so that all tuples are fully printed. @@ -720,9 +676,10 @@ def _contribution_index_cols(self, **kwargs) -> (dict, Optional[Iterable]): return self.mlca.fu_index, self.act_fields return self._correct_method_index(self.mlca.methods), None - def top_elementary_flow_contributions(self, functional_unit=None, method=None, - aggregator=None, limit=5, normalize=False, - limit_type="number", **kwargs): + def top_elementary_flow_contributions(self, functional_unit: Optional[tuple] = None, method: Optional[tuple] = None, + aggregator: Optional[str, list] = None, limit: int = 5, + normalize: bool = False, limit_type: str = "number", **kwargs + ) -> pd.DataFrame: """Return top EF contributions for either functional_unit or method. * If functional_unit: Compare the unit against all considered impact @@ -731,49 +688,42 @@ def top_elementary_flow_contributions(self, functional_unit=None, method=None, Parameters ---------- - functional_unit : tuple, optional - The reference flow to compare all considered impact categories against - method : tuple, optional - The method to compare all considered reference flows against - aggregator : str or list, optional - Used to aggregate EF contributions over certain columns - limit : int - The number of top contributions to consider - normalize : bool - Determines whether or not to normalize the contribution values - limit_type : str - The type of limit, either 'number' or 'percent' - + functional_unit : The reference flow to compare all considered impact categories against + method : The method to compare all considered reference flows against + aggregator : Used to aggregate EF contributions over certain columns + limit : The number of top contributions to consider + normalize : Determines whether or not to normalize the contribution values + limit_type : The type of limit, either 'number' or 'percent' Returns ------- - `pandas.DataFrame` - Annotated top-contribution dataframe + Annotated top-contribution dataframe """ - C = self.get_contributions(self.EF, functional_unit, method) + contributions = self.get_contributions(self.EF, functional_unit, method) x_fields = self._contribution_rows(self.EF, aggregator) index, y_fields = self._contribution_index_cols( functional_unit=functional_unit, method=method ) - C, rev_index, mask = self.aggregate_by_parameters(C, self.BIOS, aggregator) + contributions, rev_index, mask = self.aggregate_by_parameters(contributions, self.BIOS, aggregator) # Normalise if required if normalize: - C = self.normalize(C) + contributions = self.normalize(contributions) - top_cont_dict = self._build_dict(C, index, rev_index, limit, limit_type) + top_cont_dict = self._build_dict(contributions, index, rev_index, limit, limit_type) labelled_df = self.get_labelled_contribution_dict( top_cont_dict, x_fields=x_fields, y_fields=y_fields, mask=mask ) self.adjust_table_unit(labelled_df, method) return labelled_df - def top_process_contributions(self, functional_unit=None, method=None, - aggregator=None, limit=5, normalize=False, - limit_type="number", **kwargs): - """Return top process contributions for functional_unit or method + def top_process_contributions(self, functional_unit: Optional[tuple] = None, method: Optional[tuple] = None, + aggregator: Optional[str, list] = None, limit: int = 5, + normalize: bool = False, limit_type: str = "number", **kwargs + ) -> pd.DataFrame: + """Return top process contributions for functional_unit or method. * If functional_unit: Compare the process against all considered impact assessment methods. @@ -781,38 +731,31 @@ def top_process_contributions(self, functional_unit=None, method=None, Parameters ---------- - functional_unit : tuple, optional - The reference flow to compare all considered methods against - method : tuple, optional - The method to compare all considered reference flows against - aggregator : str or list, optional - Used to aggregate PC contributions over certain columns - limit : int - The number of top contributions to consider - normalize : bool - Determines whether or not to normalize the contribution values - limit_type : str - The type of limit, either 'number' or 'percent' + functional_unit : The reference flow to compare all considered impact categories against + method : The method to compare all considered reference flows against + aggregator : Used to aggregate EF contributions over certain columns + limit : The number of top contributions to consider + normalize : Determines whether or not to normalize the contribution values + limit_type : The type of limit, either 'number' or 'percent' Returns ------- - `pandas.DataFrame` - Annotated top-contribution dataframe + Annotated top-contribution dataframe """ - C = self.get_contributions(self.ACT, functional_unit, method) + contributions = self.get_contributions(self.ACT, functional_unit, method) x_fields = self._contribution_rows(self.ACT, aggregator) index, y_fields = self._contribution_index_cols( functional_unit=functional_unit, method=method ) - C, rev_index, mask = self.aggregate_by_parameters(C, self.TECH, aggregator) + contributions, rev_index, mask = self.aggregate_by_parameters(contributions, self.TECH, aggregator) # Normalise if required if normalize: - C = self.normalize(C) + contributions = self.normalize(contributions) - top_cont_dict = self._build_dict(C, index, rev_index, limit, limit_type) + top_cont_dict = self._build_dict(contributions, index, rev_index, limit, limit_type) labelled_df = self.get_labelled_contribution_dict( top_cont_dict, x_fields=x_fields, y_fields=y_fields, mask=mask ) From 3ad0ba8bab7f599c50bb923f6bc37682a76f2f74 Mon Sep 17 00:00:00 2001 From: marc-vdm Date: Fri, 22 Sep 2023 10:49:40 +0200 Subject: [PATCH 18/72] Minor documentation + code improvements --- activity_browser/bwutils/multilca.py | 229 +++++++++++++++++---------- 1 file changed, 143 insertions(+), 86 deletions(-) diff --git a/activity_browser/bwutils/multilca.py b/activity_browser/bwutils/multilca.py index 44021556f..ff51343e4 100644 --- a/activity_browser/bwutils/multilca.py +++ b/activity_browser/bwutils/multilca.py @@ -348,45 +348,55 @@ def __init__(self, mlca): "technosphere": (self.mlca.rev_activity_dict, self.mlca.lca.activity_dict, self.act_fields), } - def normalize(self, contribution_array: np.ndarray) -> np.ndarray: + def normalize(self, contribution_array): """Normalise the contribution array. Parameters ---------- - contribution_array : A 2-dimensional contribution array + contribution_array : `numpy.ndarray` + A 2-dimensional contribution array Returns ------- - 2-dimensional array of same shape, with scores normalized. + `numpy.ndarray` + 2-dimensional array of same shape, with scores normalized. """ - scores = abs(contribution_array.sum(axis=1, keepdims=True)) + scores = abs(contribution_array).sum(axis=1, keepdims=True) return contribution_array / scores - def _build_dict(self, contributions: np.ndarray, FU_M_index: dict, - rev_dict: dict, limit: int, limit_type: str) -> dict: + def _build_dict(self, C, FU_M_index, rev_dict, limit, limit_type): """Sort the given contribution array on method or reference flow column. Parameters ---------- - contributions: A 2-dimensional contribution array - FU_M_index : Dictionary which maps the reference flows or methods to their matching columns - rev_dict : 'reverse' dictionary used to map correct activity/method to its value - limit : Number of top-contributing items to include - limit_type : Either "number" or "percent", ContributionAnalysis.sort_array for complete explanation + C : `numpy.ndarray` + A 2-dimensional contribution array + FU_M_index : dict + Dictionary which maps the reference flows or methods to their + matching columns + rev_dict : dict + 'reverse' dictionary used to map correct activity/method to + its value + limit : int + Number of top-contributing items to include + limit_type : str + Either "number" or "percent", ContributionAnalysis.sort_array + for complete explanation Returns ------- - Top-contributing flows per method or activity + dict + Top-contributing flows per method or activity """ topcontribution_dict = dict() for fu_or_method, col in FU_M_index.items(): - top_contribution = ca.sort_array(contributions[col, :], limit=limit, limit_type=limit_type) + top_contribution = ca.sort_array(C[col, :], limit=limit, limit_type=limit_type) cont_per = OrderedDict() cont_per.update({ - ('Total', ''): contributions[col, :].sum(), - ('Rest', ''): contributions[col, :].sum() - top_contribution[:, 0].sum(), + ('Total', ''): C[col, :].sum(), + ('Rest', ''): C[col, :].sum() - top_contribution[:, 0].sum(), }) for value, index in top_contribution: cont_per.update({rev_dict[index]: value}) @@ -394,8 +404,8 @@ def _build_dict(self, contributions: np.ndarray, FU_M_index: dict, return topcontribution_dict @staticmethod - def get_labels(key_list: pd.MultiIndex, fields: Optional[list] = None, separator: str = ' | ', - max_length: int = False, mask: Optional[list] = None) -> list: + def get_labels(key_list, fields=None, separator=' | ', + max_length=False, mask=None): """Generate labels from metadata information. Setting max_length will wrap the label into a multi-line string if @@ -403,16 +413,23 @@ def get_labels(key_list: pd.MultiIndex, fields: Optional[list] = None, separator Parameters ---------- - key_list : An index containing 'keys' to be retrieved from the MetaDataStore - fields : List of column-names to be included from the MetaDataStore - separator : Specific separator to use when joining strings together - max_length : Allowed character length before string is wrapped over multiple lines - mask : Instead of the metadata, this list is used to check keys against. + key_list : `pandas.MultiIndex` + An index containing 'keys' to be retrieved from the MetaDataStore + fields : list + List of column-names to be included from the MetaDataStore + separator : str + Specific separator to use when joining strings together + max_length : int + Allowed character length before string is wrapped over multiple + lines + mask : list + Instead of the metadata, this list is used to check keys against. Use if data is aggregated or keys do not exist in MetaDataStore Returns ------- - Translated and/or joined (and wrapped) labels matching the keys + list + Translated and/or joined (and wrapped) labels matching the keys """ fields = fields if fields else ['name', 'reference product', 'location', 'database'] @@ -432,8 +449,8 @@ def get_labels(key_list: pd.MultiIndex, fields: Optional[list] = None, separator return translated_keys @classmethod - def join_df_with_metadata(cls, df: pd.DataFrame, x_fields: Optional[list] = None, y_fields: Optional[list] = None, - special_keys: Optional[list] = None) -> pd.DataFrame: + def join_df_with_metadata(cls, df, x_fields=None, y_fields=None, + special_keys=None): """Join a dataframe that has keys on the index with metadata. Metadata fields are defined in x_fields. @@ -442,19 +459,24 @@ def join_df_with_metadata(cls, df: pd.DataFrame, x_fields: Optional[list] = None Parameters ---------- - df : Simple DataFrame containing processed data - x_fields : List of additional columns to add from the MetaDataStore - y_fields : List of column keys for the data in the df dataframe - special_keys : List of specific items to place at the top of the dataframe + df : `pandas.DataFrame` + Simple DataFrame containing processed data + x_fields : list + List of additional columns to add from the MetaDataStore + y_fields : list + List of column keys for the data in the df dataframe + special_keys : list + List of specific items to place at the top of the dataframe Returns ------- - Expanded and metadata-annotated dataframe + `pandas.DataFrame` + Expanded and metadata-annotated dataframe """ # replace column keys with labels - df.columns = cls.get_labels(df.columns, fields=y_fields) + df.columns = cls.get_labels(df.columns, fields=y_fields)#, separator='\n') # Coerce index to MultiIndex if it currently isn't if not isinstance(df.index, pd.MultiIndex): df.index = pd.MultiIndex.from_tuples(df.index) @@ -476,20 +498,27 @@ def join_df_with_metadata(cls, df: pd.DataFrame, x_fields: Optional[list] = None joined.index = cls.get_labels(joined.index, fields=x_fields) return joined - def get_labelled_contribution_dict(self, cont_dict: dict, x_fields: list = None, - y_fields: list = None, mask: list = None) -> pd.DataFrame: + def get_labelled_contribution_dict(self, cont_dict, x_fields=None, + y_fields=None, mask=None): """Annotate the contribution dict with metadata. Parameters ---------- - cont_dict : Holds the contribution data connected to the functions of methods - x_fields : X-axis fieldnames, these are usually the indexes/keys of specific processes - y_fields : Column names specific to the cont_dict to be labelled - mask : Used in case of aggregation or special cases where the usual way of using the metadata cannot be used + cont_dict : dict + Holds the contribution data connected to the functions of methods + x_fields : list + X-axis fieldnames, these are usually the indexes/keys of specific + processes + y_fields : list + Column names specific to the cont_dict to be labelled + mask : list + Used in case of aggregation or special cases where the usual + way of using the metadata cannot be used Returns ------- - Annotated contribution dict inside a pandas dataframe + `pandas.DataFrame` + Annotated contribution dict inside a pandas dataframe """ dfs = ( @@ -507,6 +536,7 @@ def get_labelled_contribution_dict(self, cont_dict: dict, x_fields: list = None, index = df.loc[df.index.difference(special_keys)].replace(0, np.nan).dropna(how='all').index.union(special_keys) df = df.loc[index] + joined = None if not mask: joined = self.join_df_with_metadata( df, x_fields=x_fields, y_fields=y_fields, @@ -523,10 +553,13 @@ def get_labelled_contribution_dict(self, cont_dict: dict, x_fields: list = None, joined = df if joined is not None: return joined.reset_index(drop=False) + return @staticmethod def adjust_table_unit(df: pd.DataFrame, method: Optional[tuple]) -> pd.DataFrame: - """Given a dataframe, adjust the unit of the table to either match the given method, or not exist.""" + """Given a dataframe, adjust the unit of the table to either match the + given method, or not exist. + """ if "unit" not in df.columns: return df keys = df.index[~df["index"].isin({"Total", "Rest"})] @@ -545,8 +578,9 @@ def _build_inventory(inventory: dict, indices: dict, columns: list, joined.reset_index(inplace=True, drop=True) return joined - def inventory_df(self, inventory_type: str, columns: set = {'name', 'database', 'code'}) -> pd.DataFrame: - """Return an inventory dataframe with metadata of the given type.""" + def inventory_df(self, inventory_type: str, columns: set = {'name', 'database', 'code'}): + """Returns an inventory dataframe with metadata of the given type. + """ try: data = self.inventory_data[inventory_type] appending = columns.difference(set(data[3])) @@ -579,8 +613,9 @@ def _build_lca_scores_df(self, scores: np.ndarray) -> pd.DataFrame: joined = joined.loc[:, col_order.append(methods)] return joined.reset_index(drop=False) - def lca_scores_df(self, normalized: bool = False) -> pd.DataFrame: - """Return a metadata-annotated DataFrame of the LCA scores.""" + def lca_scores_df(self, normalized=False) -> pd.DataFrame: + """Returns a metadata-annotated DataFrame of the LCA scores. + """ scores = self.mlca.lca_scores if not normalized else self.mlca.lca_scores_normalized return self._build_lca_scores_df(scores) @@ -590,7 +625,8 @@ def _build_contributions(data: np.ndarray, index: int, axis: int) -> np.ndarray: def get_contributions(self, contribution, functional_unit=None, method=None) -> np.ndarray: - """Return a contribution matrix given the type and fu / method.""" + """Return a contribution matrix given the type and fu / method + """ if all([functional_unit, method]) or not any([functional_unit, method]): raise ValueError( "It must be either by reference flow or by impact category. Provided:" @@ -609,32 +645,40 @@ def get_contributions(self, contribution, functional_unit=None, dataset[contribution], self.mlca.func_key_dict[functional_unit], 0 ) - def aggregate_by_parameters(self, contributions: np.ndarray, inventory: str, + def aggregate_by_parameters(self, C: np.ndarray, inventory: str, parameters: Union[str, list] = None): - """Perform aggregation of the contribution data given parameters. + """Perform aggregation of the contribution data given parameters Parameters ---------- - contributions : 2-dimensional contribution array - inventory : Either 'biosphere' or 'technosphere', used to determine which inventory to use - parameters : One or more parameters by which to aggregate the given contribution array. + C : `numpy.ndarray` + 2-dimensional contribution array + inventory: str + Either 'biosphere' or 'technosphere', used to determine which + inventory to use + parameters : str or list + One or more parameters by which to aggregate the given contribution + array. Returns ------- - aggregated : pd.DataFrame + `numpy.ndarray` The aggregated 2-dimensional contribution array mask_index : dict - Contains all of the values of the aggregation mask, linked to their indexes + Contains all of the values of the aggregation mask, linked to + their indexes mask : list or dictview or None An optional list or dictview of the mask_index values + ------- + """ rev_index, keys, fields = self.aggregate_data[inventory] if not parameters: - return contributions, rev_index, None + return C, rev_index, None - df = pd.DataFrame(contributions).T - columns = list(range(contributions.shape[0])) + df = pd.DataFrame(C).T + columns = list(range(C.shape[0])) df.index = pd.MultiIndex.from_tuples(rev_index.values()) metadata = AB_metadata.get_metadata(list(keys), fields) @@ -651,7 +695,7 @@ def _contribution_rows(self, contribution: str, aggregator=None): return self.act_fields if contribution == self.ACT else self.ef_fields return aggregator if isinstance(aggregator, list) else [aggregator] - def _correct_method_index(self, mthd_indx: list) -> dict: + def _correct_method_index(self, mthd_indx): """ A method for amending the tuples for impact method labels so that all tuples are fully printed. @@ -676,10 +720,9 @@ def _contribution_index_cols(self, **kwargs) -> (dict, Optional[Iterable]): return self.mlca.fu_index, self.act_fields return self._correct_method_index(self.mlca.methods), None - def top_elementary_flow_contributions(self, functional_unit: Optional[tuple] = None, method: Optional[tuple] = None, - aggregator: Optional[str, list] = None, limit: int = 5, - normalize: bool = False, limit_type: str = "number", **kwargs - ) -> pd.DataFrame: + def top_elementary_flow_contributions(self, functional_unit=None, method=None, + aggregator=None, limit=5, normalize=False, + limit_type="number", **kwargs): """Return top EF contributions for either functional_unit or method. * If functional_unit: Compare the unit against all considered impact @@ -688,42 +731,49 @@ def top_elementary_flow_contributions(self, functional_unit: Optional[tuple] = N Parameters ---------- - functional_unit : The reference flow to compare all considered impact categories against - method : The method to compare all considered reference flows against - aggregator : Used to aggregate EF contributions over certain columns - limit : The number of top contributions to consider - normalize : Determines whether or not to normalize the contribution values - limit_type : The type of limit, either 'number' or 'percent' + functional_unit : tuple, optional + The reference flow to compare all considered impact categories against + method : tuple, optional + The method to compare all considered reference flows against + aggregator : str or list, optional + Used to aggregate EF contributions over certain columns + limit : int + The number of top contributions to consider + normalize : bool + Determines whether or not to normalize the contribution values + limit_type : str + The type of limit, either 'number' or 'percent' + Returns ------- - Annotated top-contribution dataframe + `pandas.DataFrame` + Annotated top-contribution dataframe """ - contributions = self.get_contributions(self.EF, functional_unit, method) + C = self.get_contributions(self.EF, functional_unit, method) x_fields = self._contribution_rows(self.EF, aggregator) index, y_fields = self._contribution_index_cols( functional_unit=functional_unit, method=method ) - contributions, rev_index, mask = self.aggregate_by_parameters(contributions, self.BIOS, aggregator) + C, rev_index, mask = self.aggregate_by_parameters(C, self.BIOS, aggregator) # Normalise if required if normalize: - contributions = self.normalize(contributions) + C = self.normalize(C) - top_cont_dict = self._build_dict(contributions, index, rev_index, limit, limit_type) + top_cont_dict = self._build_dict(C, index, rev_index, limit, limit_type) labelled_df = self.get_labelled_contribution_dict( top_cont_dict, x_fields=x_fields, y_fields=y_fields, mask=mask ) self.adjust_table_unit(labelled_df, method) return labelled_df - def top_process_contributions(self, functional_unit: Optional[tuple] = None, method: Optional[tuple] = None, - aggregator: Optional[str, list] = None, limit: int = 5, - normalize: bool = False, limit_type: str = "number", **kwargs - ) -> pd.DataFrame: - """Return top process contributions for functional_unit or method. + def top_process_contributions(self, functional_unit=None, method=None, + aggregator=None, limit=5, normalize=False, + limit_type="number", **kwargs): + """Return top process contributions for functional_unit or method * If functional_unit: Compare the process against all considered impact assessment methods. @@ -731,31 +781,38 @@ def top_process_contributions(self, functional_unit: Optional[tuple] = None, met Parameters ---------- - functional_unit : The reference flow to compare all considered impact categories against - method : The method to compare all considered reference flows against - aggregator : Used to aggregate EF contributions over certain columns - limit : The number of top contributions to consider - normalize : Determines whether or not to normalize the contribution values - limit_type : The type of limit, either 'number' or 'percent' + functional_unit : tuple, optional + The reference flow to compare all considered methods against + method : tuple, optional + The method to compare all considered reference flows against + aggregator : str or list, optional + Used to aggregate PC contributions over certain columns + limit : int + The number of top contributions to consider + normalize : bool + Determines whether or not to normalize the contribution values + limit_type : str + The type of limit, either 'number' or 'percent' Returns ------- - Annotated top-contribution dataframe + `pandas.DataFrame` + Annotated top-contribution dataframe """ - contributions = self.get_contributions(self.ACT, functional_unit, method) + C = self.get_contributions(self.ACT, functional_unit, method) x_fields = self._contribution_rows(self.ACT, aggregator) index, y_fields = self._contribution_index_cols( functional_unit=functional_unit, method=method ) - contributions, rev_index, mask = self.aggregate_by_parameters(contributions, self.TECH, aggregator) + C, rev_index, mask = self.aggregate_by_parameters(C, self.TECH, aggregator) # Normalise if required if normalize: - contributions = self.normalize(contributions) + C = self.normalize(C) - top_cont_dict = self._build_dict(contributions, index, rev_index, limit, limit_type) + top_cont_dict = self._build_dict(C, index, rev_index, limit, limit_type) labelled_df = self.get_labelled_contribution_dict( top_cont_dict, x_fields=x_fields, y_fields=y_fields, mask=mask ) From 752bd1342548a44cab294d2f3fd6fd0bfe62e00d Mon Sep 17 00:00:00 2001 From: marc-vdm Date: Thu, 28 Sep 2023 21:32:23 +0200 Subject: [PATCH 19/72] Fix type hinting error --- activity_browser/bwutils/multilca.py | 229 ++++++++++----------------- 1 file changed, 86 insertions(+), 143 deletions(-) diff --git a/activity_browser/bwutils/multilca.py b/activity_browser/bwutils/multilca.py index ff51343e4..7c0e0b0f8 100644 --- a/activity_browser/bwutils/multilca.py +++ b/activity_browser/bwutils/multilca.py @@ -348,55 +348,45 @@ def __init__(self, mlca): "technosphere": (self.mlca.rev_activity_dict, self.mlca.lca.activity_dict, self.act_fields), } - def normalize(self, contribution_array): + def normalize(self, contribution_array: np.ndarray) -> np.ndarray: """Normalise the contribution array. Parameters ---------- - contribution_array : `numpy.ndarray` - A 2-dimensional contribution array + contribution_array : A 2-dimensional contribution array Returns ------- - `numpy.ndarray` - 2-dimensional array of same shape, with scores normalized. + 2-dimensional array of same shape, with scores normalized. """ - scores = abs(contribution_array).sum(axis=1, keepdims=True) + scores = abs(contribution_array.sum(axis=1, keepdims=True)) return contribution_array / scores - def _build_dict(self, C, FU_M_index, rev_dict, limit, limit_type): + def _build_dict(self, contributions: np.ndarray, FU_M_index: dict, + rev_dict: dict, limit: int, limit_type: str) -> dict: """Sort the given contribution array on method or reference flow column. Parameters ---------- - C : `numpy.ndarray` - A 2-dimensional contribution array - FU_M_index : dict - Dictionary which maps the reference flows or methods to their - matching columns - rev_dict : dict - 'reverse' dictionary used to map correct activity/method to - its value - limit : int - Number of top-contributing items to include - limit_type : str - Either "number" or "percent", ContributionAnalysis.sort_array - for complete explanation + contributions: A 2-dimensional contribution array + FU_M_index : Dictionary which maps the reference flows or methods to their matching columns + rev_dict : 'reverse' dictionary used to map correct activity/method to its value + limit : Number of top-contributing items to include + limit_type : Either "number" or "percent", ContributionAnalysis.sort_array for complete explanation Returns ------- - dict - Top-contributing flows per method or activity + Top-contributing flows per method or activity """ topcontribution_dict = dict() for fu_or_method, col in FU_M_index.items(): - top_contribution = ca.sort_array(C[col, :], limit=limit, limit_type=limit_type) + top_contribution = ca.sort_array(contributions[col, :], limit=limit, limit_type=limit_type) cont_per = OrderedDict() cont_per.update({ - ('Total', ''): C[col, :].sum(), - ('Rest', ''): C[col, :].sum() - top_contribution[:, 0].sum(), + ('Total', ''): contributions[col, :].sum(), + ('Rest', ''): contributions[col, :].sum() - top_contribution[:, 0].sum(), }) for value, index in top_contribution: cont_per.update({rev_dict[index]: value}) @@ -404,8 +394,8 @@ def _build_dict(self, C, FU_M_index, rev_dict, limit, limit_type): return topcontribution_dict @staticmethod - def get_labels(key_list, fields=None, separator=' | ', - max_length=False, mask=None): + def get_labels(key_list: pd.MultiIndex, fields: Optional[list] = None, separator: str = ' | ', + max_length: int = False, mask: Optional[list] = None) -> list: """Generate labels from metadata information. Setting max_length will wrap the label into a multi-line string if @@ -413,23 +403,16 @@ def get_labels(key_list, fields=None, separator=' | ', Parameters ---------- - key_list : `pandas.MultiIndex` - An index containing 'keys' to be retrieved from the MetaDataStore - fields : list - List of column-names to be included from the MetaDataStore - separator : str - Specific separator to use when joining strings together - max_length : int - Allowed character length before string is wrapped over multiple - lines - mask : list - Instead of the metadata, this list is used to check keys against. + key_list : An index containing 'keys' to be retrieved from the MetaDataStore + fields : List of column-names to be included from the MetaDataStore + separator : Specific separator to use when joining strings together + max_length : Allowed character length before string is wrapped over multiple lines + mask : Instead of the metadata, this list is used to check keys against. Use if data is aggregated or keys do not exist in MetaDataStore Returns ------- - list - Translated and/or joined (and wrapped) labels matching the keys + Translated and/or joined (and wrapped) labels matching the keys """ fields = fields if fields else ['name', 'reference product', 'location', 'database'] @@ -449,8 +432,8 @@ def get_labels(key_list, fields=None, separator=' | ', return translated_keys @classmethod - def join_df_with_metadata(cls, df, x_fields=None, y_fields=None, - special_keys=None): + def join_df_with_metadata(cls, df: pd.DataFrame, x_fields: Optional[list] = None, y_fields: Optional[list] = None, + special_keys: Optional[list] = None) -> pd.DataFrame: """Join a dataframe that has keys on the index with metadata. Metadata fields are defined in x_fields. @@ -459,24 +442,19 @@ def join_df_with_metadata(cls, df, x_fields=None, y_fields=None, Parameters ---------- - df : `pandas.DataFrame` - Simple DataFrame containing processed data - x_fields : list - List of additional columns to add from the MetaDataStore - y_fields : list - List of column keys for the data in the df dataframe - special_keys : list - List of specific items to place at the top of the dataframe + df : Simple DataFrame containing processed data + x_fields : List of additional columns to add from the MetaDataStore + y_fields : List of column keys for the data in the df dataframe + special_keys : List of specific items to place at the top of the dataframe Returns ------- - `pandas.DataFrame` - Expanded and metadata-annotated dataframe + Expanded and metadata-annotated dataframe """ # replace column keys with labels - df.columns = cls.get_labels(df.columns, fields=y_fields)#, separator='\n') + df.columns = cls.get_labels(df.columns, fields=y_fields) # Coerce index to MultiIndex if it currently isn't if not isinstance(df.index, pd.MultiIndex): df.index = pd.MultiIndex.from_tuples(df.index) @@ -498,27 +476,20 @@ def join_df_with_metadata(cls, df, x_fields=None, y_fields=None, joined.index = cls.get_labels(joined.index, fields=x_fields) return joined - def get_labelled_contribution_dict(self, cont_dict, x_fields=None, - y_fields=None, mask=None): + def get_labelled_contribution_dict(self, cont_dict: dict, x_fields: list = None, + y_fields: list = None, mask: list = None) -> pd.DataFrame: """Annotate the contribution dict with metadata. Parameters ---------- - cont_dict : dict - Holds the contribution data connected to the functions of methods - x_fields : list - X-axis fieldnames, these are usually the indexes/keys of specific - processes - y_fields : list - Column names specific to the cont_dict to be labelled - mask : list - Used in case of aggregation or special cases where the usual - way of using the metadata cannot be used + cont_dict : Holds the contribution data connected to the functions of methods + x_fields : X-axis fieldnames, these are usually the indexes/keys of specific processes + y_fields : Column names specific to the cont_dict to be labelled + mask : Used in case of aggregation or special cases where the usual way of using the metadata cannot be used Returns ------- - `pandas.DataFrame` - Annotated contribution dict inside a pandas dataframe + Annotated contribution dict inside a pandas dataframe """ dfs = ( @@ -536,7 +507,6 @@ def get_labelled_contribution_dict(self, cont_dict, x_fields=None, index = df.loc[df.index.difference(special_keys)].replace(0, np.nan).dropna(how='all').index.union(special_keys) df = df.loc[index] - joined = None if not mask: joined = self.join_df_with_metadata( df, x_fields=x_fields, y_fields=y_fields, @@ -553,13 +523,10 @@ def get_labelled_contribution_dict(self, cont_dict, x_fields=None, joined = df if joined is not None: return joined.reset_index(drop=False) - return @staticmethod def adjust_table_unit(df: pd.DataFrame, method: Optional[tuple]) -> pd.DataFrame: - """Given a dataframe, adjust the unit of the table to either match the - given method, or not exist. - """ + """Given a dataframe, adjust the unit of the table to either match the given method, or not exist.""" if "unit" not in df.columns: return df keys = df.index[~df["index"].isin({"Total", "Rest"})] @@ -578,9 +545,8 @@ def _build_inventory(inventory: dict, indices: dict, columns: list, joined.reset_index(inplace=True, drop=True) return joined - def inventory_df(self, inventory_type: str, columns: set = {'name', 'database', 'code'}): - """Returns an inventory dataframe with metadata of the given type. - """ + def inventory_df(self, inventory_type: str, columns: set = {'name', 'database', 'code'}) -> pd.DataFrame: + """Return an inventory dataframe with metadata of the given type.""" try: data = self.inventory_data[inventory_type] appending = columns.difference(set(data[3])) @@ -613,9 +579,8 @@ def _build_lca_scores_df(self, scores: np.ndarray) -> pd.DataFrame: joined = joined.loc[:, col_order.append(methods)] return joined.reset_index(drop=False) - def lca_scores_df(self, normalized=False) -> pd.DataFrame: - """Returns a metadata-annotated DataFrame of the LCA scores. - """ + def lca_scores_df(self, normalized: bool = False) -> pd.DataFrame: + """Return a metadata-annotated DataFrame of the LCA scores.""" scores = self.mlca.lca_scores if not normalized else self.mlca.lca_scores_normalized return self._build_lca_scores_df(scores) @@ -625,8 +590,7 @@ def _build_contributions(data: np.ndarray, index: int, axis: int) -> np.ndarray: def get_contributions(self, contribution, functional_unit=None, method=None) -> np.ndarray: - """Return a contribution matrix given the type and fu / method - """ + """Return a contribution matrix given the type and fu / method.""" if all([functional_unit, method]) or not any([functional_unit, method]): raise ValueError( "It must be either by reference flow or by impact category. Provided:" @@ -645,40 +609,32 @@ def get_contributions(self, contribution, functional_unit=None, dataset[contribution], self.mlca.func_key_dict[functional_unit], 0 ) - def aggregate_by_parameters(self, C: np.ndarray, inventory: str, + def aggregate_by_parameters(self, contributions: np.ndarray, inventory: str, parameters: Union[str, list] = None): - """Perform aggregation of the contribution data given parameters + """Perform aggregation of the contribution data given parameters. Parameters ---------- - C : `numpy.ndarray` - 2-dimensional contribution array - inventory: str - Either 'biosphere' or 'technosphere', used to determine which - inventory to use - parameters : str or list - One or more parameters by which to aggregate the given contribution - array. + contributions : 2-dimensional contribution array + inventory : Either 'biosphere' or 'technosphere', used to determine which inventory to use + parameters : One or more parameters by which to aggregate the given contribution array. Returns ------- - `numpy.ndarray` + aggregated : pd.DataFrame The aggregated 2-dimensional contribution array mask_index : dict - Contains all of the values of the aggregation mask, linked to - their indexes + Contains all of the values of the aggregation mask, linked to their indexes mask : list or dictview or None An optional list or dictview of the mask_index values - ------- - """ rev_index, keys, fields = self.aggregate_data[inventory] if not parameters: - return C, rev_index, None + return contributions, rev_index, None - df = pd.DataFrame(C).T - columns = list(range(C.shape[0])) + df = pd.DataFrame(contributions).T + columns = list(range(contributions.shape[0])) df.index = pd.MultiIndex.from_tuples(rev_index.values()) metadata = AB_metadata.get_metadata(list(keys), fields) @@ -695,7 +651,7 @@ def _contribution_rows(self, contribution: str, aggregator=None): return self.act_fields if contribution == self.ACT else self.ef_fields return aggregator if isinstance(aggregator, list) else [aggregator] - def _correct_method_index(self, mthd_indx): + def _correct_method_index(self, mthd_indx: list) -> dict: """ A method for amending the tuples for impact method labels so that all tuples are fully printed. @@ -720,9 +676,10 @@ def _contribution_index_cols(self, **kwargs) -> (dict, Optional[Iterable]): return self.mlca.fu_index, self.act_fields return self._correct_method_index(self.mlca.methods), None - def top_elementary_flow_contributions(self, functional_unit=None, method=None, - aggregator=None, limit=5, normalize=False, - limit_type="number", **kwargs): + def top_elementary_flow_contributions(self, functional_unit: Optional[tuple] = None, method: Optional[tuple] = None, + aggregator: Union[str, list, None] = None, limit: int = 5, + normalize: bool = False, limit_type: str = "number", **kwargs + ) -> pd.DataFrame: """Return top EF contributions for either functional_unit or method. * If functional_unit: Compare the unit against all considered impact @@ -731,49 +688,42 @@ def top_elementary_flow_contributions(self, functional_unit=None, method=None, Parameters ---------- - functional_unit : tuple, optional - The reference flow to compare all considered impact categories against - method : tuple, optional - The method to compare all considered reference flows against - aggregator : str or list, optional - Used to aggregate EF contributions over certain columns - limit : int - The number of top contributions to consider - normalize : bool - Determines whether or not to normalize the contribution values - limit_type : str - The type of limit, either 'number' or 'percent' - + functional_unit : The reference flow to compare all considered impact categories against + method : The method to compare all considered reference flows against + aggregator : Used to aggregate EF contributions over certain columns + limit : The number of top contributions to consider + normalize : Determines whether or not to normalize the contribution values + limit_type : The type of limit, either 'number' or 'percent' Returns ------- - `pandas.DataFrame` - Annotated top-contribution dataframe + Annotated top-contribution dataframe """ - C = self.get_contributions(self.EF, functional_unit, method) + contributions = self.get_contributions(self.EF, functional_unit, method) x_fields = self._contribution_rows(self.EF, aggregator) index, y_fields = self._contribution_index_cols( functional_unit=functional_unit, method=method ) - C, rev_index, mask = self.aggregate_by_parameters(C, self.BIOS, aggregator) + contributions, rev_index, mask = self.aggregate_by_parameters(contributions, self.BIOS, aggregator) # Normalise if required if normalize: - C = self.normalize(C) + contributions = self.normalize(contributions) - top_cont_dict = self._build_dict(C, index, rev_index, limit, limit_type) + top_cont_dict = self._build_dict(contributions, index, rev_index, limit, limit_type) labelled_df = self.get_labelled_contribution_dict( top_cont_dict, x_fields=x_fields, y_fields=y_fields, mask=mask ) self.adjust_table_unit(labelled_df, method) return labelled_df - def top_process_contributions(self, functional_unit=None, method=None, - aggregator=None, limit=5, normalize=False, - limit_type="number", **kwargs): - """Return top process contributions for functional_unit or method + def top_process_contributions(self, functional_unit: Optional[tuple] = None, method: Optional[tuple] = None, + aggregator: Union[str, list, None] = None, limit: int = 5, + normalize: bool = False, limit_type: str = "number", **kwargs + ) -> pd.DataFrame: + """Return top process contributions for functional_unit or method. * If functional_unit: Compare the process against all considered impact assessment methods. @@ -781,38 +731,31 @@ def top_process_contributions(self, functional_unit=None, method=None, Parameters ---------- - functional_unit : tuple, optional - The reference flow to compare all considered methods against - method : tuple, optional - The method to compare all considered reference flows against - aggregator : str or list, optional - Used to aggregate PC contributions over certain columns - limit : int - The number of top contributions to consider - normalize : bool - Determines whether or not to normalize the contribution values - limit_type : str - The type of limit, either 'number' or 'percent' + functional_unit : The reference flow to compare all considered impact categories against + method : The method to compare all considered reference flows against + aggregator : Used to aggregate EF contributions over certain columns + limit : The number of top contributions to consider + normalize : Determines whether or not to normalize the contribution values + limit_type : The type of limit, either 'number' or 'percent' Returns ------- - `pandas.DataFrame` - Annotated top-contribution dataframe + Annotated top-contribution dataframe """ - C = self.get_contributions(self.ACT, functional_unit, method) + contributions = self.get_contributions(self.ACT, functional_unit, method) x_fields = self._contribution_rows(self.ACT, aggregator) index, y_fields = self._contribution_index_cols( functional_unit=functional_unit, method=method ) - C, rev_index, mask = self.aggregate_by_parameters(C, self.TECH, aggregator) + contributions, rev_index, mask = self.aggregate_by_parameters(contributions, self.TECH, aggregator) # Normalise if required if normalize: - C = self.normalize(C) + contributions = self.normalize(contributions) - top_cont_dict = self._build_dict(C, index, rev_index, limit, limit_type) + top_cont_dict = self._build_dict(contributions, index, rev_index, limit, limit_type) labelled_df = self.get_labelled_contribution_dict( top_cont_dict, x_fields=x_fields, y_fields=y_fields, mask=mask ) From 97ad641f621311898e8722031429c440df8cd39d Mon Sep 17 00:00:00 2001 From: Marc van der Meide Date: Wed, 11 Oct 2023 10:47:30 +0200 Subject: [PATCH 20/72] Update README.md --- README.md | 62 +++++++------------------------------------------------ 1 file changed, 8 insertions(+), 54 deletions(-) diff --git a/README.md b/README.md index 1b30a0505..88e57f66e 100644 --- a/README.md +++ b/README.md @@ -27,13 +27,7 @@ Please also read and cite our [scientific paper](https://doi.org/10.1016/j.simpa # Contents - [Installation](#installation) - - [The quick way](#the-quick-way) - - [The thorough way](#the-thorough-way) - - [Conda](#conda) - - [Install the AB with ecoinvent >=3.9](#install-the-ab-with-ecoinvent-39) - - [Install the AB with ecoinvent <3.9](#install-the-ab-with-older-ecoinvent-versions-39) - - [Updating the AB](#updating-the-ab) - - [Mamba](#mamba) +- [Updating the AB](#updating-the-ab) - [Getting started](#getting-started) - [Running the AB](#running-the-ab) - [Importing LCI databases](#importing-lci-databases) @@ -50,8 +44,6 @@ Please also read and cite our [scientific paper](https://doi.org/10.1016/j.simpa # Installation -## The quick way - You can install and start the activity-browser like this: ```bash @@ -60,41 +52,14 @@ conda activate ab activity-browser ``` -## The thorough way - -| :warning: The activity browser has dropped support for python versions below `3.8`| -|---| -| You should re-install if you have an older installation of the activity browser which doesn't use `python >= 3.8` (you can check with `conda list` or `python --version` in your conda environment). You can remove your existing environment with `conda remove -n ab --all` or choose a new environment name (instead of `ab`). Re-installing will not affect your activity-browser/brightway projects. | - -### Conda - -We recommend that you use **conda** to manage your python installation. You can install [Anaconda](https://www.anaconda.com/products/individual) or the more compact [miniconda](https://conda.io/miniconda.html) (Python 3 of course) for your operating system. Installation instructions for miniconda can be found [here](https://docs.conda.io/projects/conda/en/latest/user-guide/install/index.html). See also the [conda user guide](https://docs.conda.io/projects/conda/en/latest/user-guide/index.html) or the [Conda cheat sheet](https://docs.conda.io/projects/conda/en/latest/_downloads/843d9e0198f2a193a3484886fa28163c/conda-cheatsheet.pdf). - -Skip this step if you already have a working installation of anaconda or miniconda, but make sure to keep your conda installation up-to-date: `conda update conda`. - -### Add the Conda-Forge channel -The activity-browser has many dependencies that are managed by the [conda-forge](https://conda.io/docs/user-guide/tasks/manage-channels.html) channel. Open a cmd-window or terminal (in Windows you may have to use the Anaconda prompt) and type the following: - -```bash -conda config --prepend channels conda-forge -``` -### Install the AB with ecoinvent >=3.9 -After prepending the Conda-Forge channel the following line should be executed within the command prompt/terminal to install the AB and it's dependencies. - -```bash -conda create -n ab activity-browser -``` -This will install the Activity Browser with the latest version of the Brightway2 libraries (currently excluding Brightway2.5 libraries). - -### Install the AB with older ecoinvent versions (<3.9) - -If you want to work with with older versions of ecoinvent (<3.9) in the AB, a different Biosphere3 database needs to be installed. This requires a _**different version of the bw2io library**_ to be installed, see also [here](https://github.com/brightway-lca/brightway2-io). Note that this version of bw2io can ONLY work with ecoinvent versions < 3.9. If you want to work with version > 3.9 AND < 3.9, the only solution currently available is to use two separate virtual environments (i.e. two AB installations). +### Mamba -To install a version of the AB that can handle ecoinvent versions <3.9, do the following: For a new installation from the conda-forge repository the same initial steps need to be made: Prepending the Conda-Forge repository in the channels, and installing the AB and dependencies. After the successful installation, the following two commands need to be executed before running the AB: 1) Remove the latest version of the Brightway2 Input-Output library, 2) Install an older version of the Brightway2 Input-Output library. +You can also install the AB using [Mamba](https://mamba.readthedocs.io/en/latest/mamba-installation.html#mamba-install): ```bash -conda remove --force bw2io -conda install bw2io=0.8.7 +mamba create -n ab activity-browser +mamba activate ab +activity-browser ``` #### Activity Browser is installed @@ -110,16 +75,6 @@ conda activate ab conda update activity-browser ``` -## Mamba - -You can also install the AB using [Mamba](https://mamba.readthedocs.io/en/latest/mamba-installation.html#mamba-install): - -```bash -mamba create -n ab activity-browser -mamba activate ab -activity-browser -``` - # Getting started ## Running the AB @@ -173,7 +128,6 @@ These are the plugins that we know about. To add your plugin to this list either | [Notebook](https://github.com/Pan6ora/ab-plugin-Notebook) | Use Jupyter notebooks from AB | [anaconda](https://anaconda.org/pan6ora/ab-plugin-template), [github](https://github.com/Pan6ora/ab-plugin-Notebook) | Rémy Le Calloch | | [template](https://github.com/Pan6ora/activity-browser-plugin-template) | An empty plugin to start from | [anaconda](https://anaconda.org/pan6ora/ab-plugin-template), [github](https://github.com/Pan6ora/activity-browser-plugin-template) | Rémy Le Calloch | - ## Installation ### detailed instructions @@ -226,14 +180,14 @@ If you experience problems or are suffering from a specific bug, please [raise a ### Current main developers - Bernhard Steubing (b.steubing@cml.leidenuniv.nl) (creator) -- Jonathan Kidner (j.h.kidner@cml.leidenuniv.nl) (lead developer) +- Marc van der Meide ([github]((https://github.com/marc-vdm))) (maintainer) ### Important contributers - [Adrian Haas](https://github.com/haasad) - [Chris Mutel](https://github.com/cmutel) - [Daniel de Koning](https://github.com/dgdekoning) -- [Marc van der Meide](https://github.com/marc-vdm) +- [Jonathan Kidner](https://github.com/Zoophobus) - [Remy le Calloch](https://remy.lecalloch.net) # Copyright From 8854cb655d34e8fcd6d3eb2abd62b14f820056ea Mon Sep 17 00:00:00 2001 From: Marc van der Meide Date: Wed, 11 Oct 2023 11:03:27 +0200 Subject: [PATCH 21/72] Update README.md --- README.md | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/README.md b/README.md index 88e57f66e..a0bb99fe8 100644 --- a/README.md +++ b/README.md @@ -44,6 +44,8 @@ Please also read and cite our [scientific paper](https://doi.org/10.1016/j.simpa # Installation +## The quick way + You can install and start the activity-browser like this: ```bash @@ -62,6 +64,28 @@ mamba activate ab activity-browser ``` +## The thorough way +### Conda + +We recommend that you use **conda** to manage your python installation. You can install [Anaconda](https://www.anaconda.com/products/individual) or the more compact [miniconda](https://conda.io/miniconda.html) (Python 3 version) for your operating system. Installation instructions for miniconda can be found [here](https://docs.conda.io/projects/conda/en/latest/user-guide/install/index.html). See also the [conda user guide](https://docs.conda.io/projects/conda/en/latest/user-guide/index.html) or the [Conda cheat sheet](https://docs.conda.io/projects/conda/en/latest/_downloads/843d9e0198f2a193a3484886fa28163c/conda-cheatsheet.pdf). + +Skip this step if you already have a working installation of anaconda or miniconda, but make sure to keep your conda installation up-to-date: `conda update conda`. + +### Add the Conda-Forge channel +The activity-browser has many dependencies that are managed by the [conda-forge](https://conda.io/docs/user-guide/tasks/manage-channels.html) channel. Open a cmd-window or terminal (in Windows you may have to use the Anaconda prompt) and type the following: + +```bash +conda config --prepend channels conda-forge +``` + +### Installing Activity Browser + +```bash +conda create -n ab -c conda-forge activity-browser +conda activate ab +activity-browser +``` + #### Activity Browser is installed At this point the activity-browser and all of its dependencies will be installed in a new conda environment called `ab`. You can change the environment name `ab` to whatever suits you. From 1e5fe270d1841b96f889b5410b3b59f1d090f4c2 Mon Sep 17 00:00:00 2001 From: Marc van der Meide Date: Wed, 11 Oct 2023 11:38:55 +0200 Subject: [PATCH 22/72] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a0bb99fe8..b80958139 100644 --- a/README.md +++ b/README.md @@ -204,7 +204,7 @@ If you experience problems or are suffering from a specific bug, please [raise a ### Current main developers - Bernhard Steubing (b.steubing@cml.leidenuniv.nl) (creator) -- Marc van der Meide ([github]((https://github.com/marc-vdm))) (maintainer) +- Marc van der Meide ([github](https://github.com/marc-vdm)) (maintainer) ### Important contributers From 3852c717383ff3dfa3f347e6ce6573ee471c6793 Mon Sep 17 00:00:00 2001 From: Jonathan Kidner <12627199+Zoophobus@users.noreply.github.com> Date: Sat, 14 Oct 2023 16:04:36 +0200 Subject: [PATCH 23/72] Updates to the functions get_relevant_flows and get_relevant_activities (#1069) * Updates to the functions from de Koning (get_relevant_flows and get_relevant_activities), avoiding use of pandas apply and using python map functionality for splitting pandas dataframes. * Update the use of DataFrame.applymap to DataFrame.map in the excel file importer module. --- activity_browser/bwutils/superstructure/activities.py | 6 ++++-- activity_browser/bwutils/superstructure/excel.py | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/activity_browser/bwutils/superstructure/activities.py b/activity_browser/bwutils/superstructure/activities.py index 97c990012..ead625063 100644 --- a/activity_browser/bwutils/superstructure/activities.py +++ b/activity_browser/bwutils/superstructure/activities.py @@ -91,7 +91,8 @@ def get_relevant_activities(df: pd.DataFrame, part: str = "from") -> dict: if sub.empty: return {} - names, products, locations, dbs = sub.iloc[:, 0:4].apply(set, axis=0) + names, products, locations, dbs = list(map(set, sub.iloc[:, 0:4].values.T)) +# names, products, locations, dbs = sub.iloc[:, 0:4].apply(set, axis=0) query = (ActivityDataset .select(ActivityDataset.name, ActivityDataset.product, ActivityDataset.location, ActivityDataset.database, @@ -113,7 +114,8 @@ def get_relevant_flows(df: pd.DataFrame, part: str = "from") -> dict: if sub.empty: return {} - names, categories, dbs = sub.iloc[:, 0:3].apply(set, axis=0) + names, categories, dbs = list(map(set, sub.iloc[:, 0:3].values.T)) +# names, categories, dbs = sub.iloc[:, 0:3].apply(set, axis=0) query = (ActivityDataset .select(ActivityDataset.name, ActivityDataset.data, ActivityDataset.database, ActivityDataset.code) diff --git a/activity_browser/bwutils/superstructure/excel.py b/activity_browser/bwutils/superstructure/excel.py index 9d6435c24..e009c624f 100644 --- a/activity_browser/bwutils/superstructure/excel.py +++ b/activity_browser/bwutils/superstructure/excel.py @@ -83,7 +83,7 @@ def import_from_excel(document_path: Union[str, Path], import_sheet: int = 1) -> # Convert specific columns that may have tuples as strings columns = ["from categories", "from key", "to categories", "to key"] - data.loc[:, columns] = data[columns].applymap(convert_tuple_str) + data.loc[:, columns] = data[columns].map(convert_tuple_str) except: # skip the error checks here, these now occur in the calling layout.tabs.LCA_setup module pass From b6e9c413fb457d8169e10e6c8a05063e5752395b Mon Sep 17 00:00:00 2001 From: Jonathan Kidner <12627199+Zoophobus@users.noreply.github.com> Date: Fri, 20 Oct 2023 08:41:49 +0200 Subject: [PATCH 24/72] Multiple sdf update (#1083) * Updates to the logging system to avoid the print statement. Merging with commits for corrections to the uncertainty distributions that were included within the same branch. Improves thread safety in logging increasing stability in the multi-threaded processes. * Changes the creation of log files. Amends the uncertainty wizard test to correct for use of logging over the standard console. Adds the .logs to .gitignore. * 1) Simplifies the use of the python logging facility with QtThreads (as published on the python docs for logging), improving the stability of logging.\n2) Changes the generation of the logging object that is more compatible with pytest, supporting the existing test procedures. * Provides an update to the Model for the calculation_setup Methods table, corrects for deletion of impact assessment methods. Updates the routine to the lca_setup module for calling the logger. * Updates to the logger module, what was the ABLogger class is renamed to ABHandler. The module now contains the formats and settings for the Stream handlers. Calling of the methods in the other AB modules requires passing a logging.Logger instance and the name of the calling module. These are then incorporated into the wrapping routines in the class. Additions:\n 1) An error wrapper is provided and also explicitly provides a trace of the error.\n 2) A timestamp routine is provided and used for providing the file names for log files.\n 3) A standard location based on appdirs is provided and used for log file locations. * Corrections to the setup of the logger in the test_uncertainty_wizard module * Includes a change to the type used for the scenario columns when using multiple files with the combined (combinatoric) approach for the scenarios. Includes respective changes to the boolean tests applied to such Indexes. * Minor corrections to local repository branch, to keep changes aligned * Alterations to keep minor changes aligned with master branch and fork --------- Co-authored-by: zoo --- .../bwutils/superstructure/manager.py | 20 +++++++++---------- tests/test_uncertainty_wizard.py | 1 - 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/activity_browser/bwutils/superstructure/manager.py b/activity_browser/bwutils/superstructure/manager.py index e80f49c1e..c93b346a9 100644 --- a/activity_browser/bwutils/superstructure/manager.py +++ b/activity_browser/bwutils/superstructure/manager.py @@ -14,18 +14,18 @@ from .dataframe import scenario_columns from .utils import guess_flow_type, SUPERSTRUCTURE, _time_it_, edit_superstructure_for_string -import logging -from activity_browser.logger import ABHandler - -logger = logging.getLogger('ab_logs') -log = ABHandler.setup_with_logger(logger, __name__) - from .file_dialogs import ABPopup from ..errors import (CriticalScenarioExtensionError, ScenarioExchangeNotFoundError, ImportCanceledError, ScenarioExchangeDataNotFoundError, UnalignableScenarioColumnsWarning, ScenarioExchangeDataNonNumericError ) +import logging +from activity_browser.logger import ABHandler + +logger = logging.getLogger('ab_logs') +log = ABHandler.setup_with_logger(logger, __name__) + EXCHANGE_KEYS = pd.Index(["from key", "to key"]) INDEX_KEYS = pd.Index(["from key", "to key", "flow type"]) @@ -113,9 +113,8 @@ def _combine_columns(self) -> pd.MultiIndex: ------- A pandas multi-index with the separate dataframes in self.frames contributing to the index levels """ - cols = [scenario_columns(df).to_list() for df in self.frames] - return pd.MultiIndex.from_tuples(list(itertools.product(*cols))) + return pd.Index([str(c) for c in list(itertools.product(*cols))]) def _combine_columns_intersect(self) -> pd.Index: iterable = iter(self.frames) @@ -175,7 +174,7 @@ def combine(one, two): """ for col_two in SUPERSTRUCTURE.symmetric_difference(two.columns): for idx in one.columns: - if col_two in set(idx): + if col_two in idx: one.loc[two.index, idx] = two.loc[:, col_two] base_scenario_data = pd.DataFrame([], index=index, columns=SUPERSTRUCTURE) scenarios_data = pd.DataFrame([], index=index, columns=cols) @@ -271,8 +270,7 @@ def merge_flows_to_self(df: pd.DataFrame) -> pd.DataFrame: ) scenario_cols = df.columns.difference(SUPERSTRUCTURE) prod_indexes = self_referential_production_flows.loc[self_referential_production_flows.index.isin(df.index)].index - self_referential_production_flows.loc[prod_indexes, scenario_cols] \ - = df.loc[df.index.isin(self_referential_production_flows.index), scenario_cols] + self_referential_production_flows.loc[prod_indexes, scenario_cols] = df.loc[df.index.isin(self_referential_production_flows.index), scenario_cols] self_referential_production_flows.loc[prod_indexes, 'flow type'] = 'production' # TODO use metadata for the default production values diff --git a/tests/test_uncertainty_wizard.py b/tests/test_uncertainty_wizard.py index 6d5ba710c..2c44972fc 100644 --- a/tests/test_uncertainty_wizard.py +++ b/tests/test_uncertainty_wizard.py @@ -14,7 +14,6 @@ from activity_browser.ui.wizards import UncertaintyWizard from activity_browser.signals import signals - from activity_browser.logger import ABHandler """ From 92aa40fd132fb661c1f6d580e09083f744c82a70 Mon Sep 17 00:00:00 2001 From: haasad Date: Sun, 29 Oct 2023 22:11:10 +0100 Subject: [PATCH 25/72] Use node16 actions for main pipeline --- .github/workflows/main.yaml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 0f5df1c51..4d472d3a0 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -11,7 +11,7 @@ jobs: patch-test-environment: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Patch test environment dependencies # This step adds the run requirements from the stable recipe to the test environment uses: mikefarah/yq@master @@ -21,7 +21,7 @@ jobs: - name: Show patched environment run: cat patched-environment.yml - name: Upload patched environment as artifact - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: patched-environment path: patched-environment.yml @@ -38,9 +38,9 @@ jobs: run: shell: bash -l {0} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Download patched test environment - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: name: patched-environment - name: Setup python ${{ matrix.python-version }} conda environment @@ -55,7 +55,7 @@ jobs: conda env export conda env export -f env.yaml - name: Upload final environment as artifact - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: env-${{ matrix.os }}-${{ matrix.python-version }} path: env.yaml @@ -107,7 +107,7 @@ jobs: env: PKG_NAME: "activity-browser-dev" steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Build and deploy 3.8 uses: conda-incubator/setup-miniconda@v2 with: @@ -150,7 +150,7 @@ jobs: env: PKG_NAME: "activity-browser-arm" steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Build and deploy 3.8 uses: conda-incubator/setup-miniconda@v2 with: From 909408ba121a0188f88832921c98348cbebdfb65 Mon Sep 17 00:00:00 2001 From: haasad Date: Sun, 29 Oct 2023 22:23:04 +0100 Subject: [PATCH 26/72] Use node16 actions in release pipeline --- .github/workflows/release.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 0e6df112a..69f1affe2 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -11,7 +11,7 @@ jobs: run: shell: bash -l {0} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Build changelog from PRs with labels id: build_changelog uses: mikepenz/release-changelog-builder-action@v2 From 0654b821bf3d019d0d68228b4d4c54e0ed11c2c4 Mon Sep 17 00:00:00 2001 From: haasad Date: Sun, 29 Oct 2023 22:25:25 +0100 Subject: [PATCH 27/72] Use latest version of release-changelog-builder --- .github/workflows/release.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 69f1affe2..746e9a818 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -14,7 +14,7 @@ jobs: - uses: actions/checkout@v3 - name: Build changelog from PRs with labels id: build_changelog - uses: mikepenz/release-changelog-builder-action@v2 + uses: mikepenz/release-changelog-builder-action@v4 with: configuration: "ci/changelog-configuration.json" env: From e564225f59767eda7861ff772ab9a9e0aa779182 Mon Sep 17 00:00:00 2001 From: haasad Date: Sun, 29 Oct 2023 22:26:12 +0100 Subject: [PATCH 28/72] Use node16 action for install canary pipeline --- .github/workflows/install-canary.yaml | 40 +++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 .github/workflows/install-canary.yaml diff --git a/.github/workflows/install-canary.yaml b/.github/workflows/install-canary.yaml new file mode 100644 index 000000000..1222e6a13 --- /dev/null +++ b/.github/workflows/install-canary.yaml @@ -0,0 +1,40 @@ +name: canary installation +on: + schedule: + # Run the tests once every 24 hours to catch dependency problems early + - cron: '0 7 * * *' + push: + branches: + - install-canary + +jobs: + canary-installs: + runs-on: ${{ matrix.os }} + timeout-minutes: 12 + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + python-version: ['3.8', '3.9'] + defaults: + run: + shell: bash -l {0} + steps: + - name: Setup python ${{ matrix.python-version }} conda environment + uses: conda-incubator/setup-miniconda@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Install activity-browser + run: | + conda create -y -n ab -c conda-forge activity-browser python=${{ matrix.python-version }} + - name: Environment info + run: | + conda activate ab + conda list + conda env export + conda env export -f env.yaml + - name: Upload final environment as artifact + uses: actions/upload-artifact@v3 + with: + name: env-${{ matrix.os }}-${{ matrix.python-version }} + path: env.yaml From ab19c06389de7e9b84575ba6c041161a245a1278 Mon Sep 17 00:00:00 2001 From: haasad Date: Sun, 29 Oct 2023 22:44:04 +0100 Subject: [PATCH 29/72] Remove the special build for arm architecture This was added before brightway2 supported multi-arch builds on conda-forge. It is not needed anymore, because the normal AB is now also installable on arm arch. --- .github/workflows/main.yaml | 46 ------------------------------------- ci/conda-envs/ab_arm.yml | 7 ------ ci/recipe/arm/meta.yaml | 35 ---------------------------- 3 files changed, 88 deletions(-) delete mode 100644 ci/conda-envs/ab_arm.yml delete mode 100644 ci/recipe/arm/meta.yaml diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 4d472d3a0..71d5f4bb3 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -135,49 +135,3 @@ jobs: anaconda -t ${{ secrets.CONDA_UPLOAD_TOKEN }} upload --force \ /usr/share/miniconda/envs/build/conda-bld/noarch/*.tar.bz2 anaconda -t ${{ secrets.CONDA_UPLOAD_TOKEN }} upload ci/conda-envs/ab_dev.yml - - - deploy-arm: - # Make sure to only run a deploy if all tests pass. - needs: - - tests - # And only on a push event, not a pull_request. - if: ${{ github.event_name == 'push' }} - runs-on: ubuntu-latest - defaults: - run: - shell: bash -l {0} - env: - PKG_NAME: "activity-browser-arm" - steps: - - uses: actions/checkout@v3 - - name: Build and deploy 3.8 - uses: conda-incubator/setup-miniconda@v2 - with: - python-version: 3.8 - activate-environment: build - environment-file: ci/conda-envs/build.yml - - name: Export version - run: | - echo "VERSION=$(date +'%Y.%m.%d')" >> $GITHUB_ENV - - name: Patch recipe with run requirements from stable - uses: mikefarah/yq@master - # Adds the run dependencies from the stable recipe to the arm recipe - # drop brightway2, but add brightway2_nosolver and scikit-umfpack - # Also adds the dependecies to the ab_arm environment file - with: - cmd: | - yq e '.requirements.run.[] | select(. != "brightway2*") | [.]' ci/recipe/stable/meta.yaml > arm_requirements.yaml - yq e -i '. += ["brightway2_nosolver", "scikit-umfpack"]' arm_requirements.yaml - yq eval-all -i 'select(fi == 0).requirements.run += select(fi == 1) | select(fi == 0)' ci/recipe/arm/meta.yaml arm_requirements.yaml - yq eval-all -i 'select(fi == 0).dependencies += select(fi == 1) | select(fi == 0)' ci/conda-envs/ab_arm.yml arm_requirements.yaml - - name: Show patched arm recipe - run: cat ci/recipe/arm/meta.yaml - - name: Build development package - run: | - conda build ci/recipe/arm - - name: Upload the activity-browser-arm package and env - run: | - anaconda -t ${{ secrets.CONDA_UPLOAD_TOKEN }} upload --force \ - /usr/share/miniconda/envs/build/conda-bld/noarch/*.tar.bz2 - anaconda -t ${{ secrets.CONDA_UPLOAD_TOKEN }} upload ci/conda-envs/ab_arm.yml diff --git a/ci/conda-envs/ab_arm.yml b/ci/conda-envs/ab_arm.yml deleted file mode 100644 index 33f272058..000000000 --- a/ci/conda-envs/ab_arm.yml +++ /dev/null @@ -1,7 +0,0 @@ -name: ab_arm -channels: - - conda-forge - - bsteubing - - cmutel -dependencies: - - activity-browser-arm diff --git a/ci/recipe/arm/meta.yaml b/ci/recipe/arm/meta.yaml deleted file mode 100644 index 98090a131..000000000 --- a/ci/recipe/arm/meta.yaml +++ /dev/null @@ -1,35 +0,0 @@ -package: - name: activity-browser-arm - version: "{{ os.environ.get('VERSION', 'dev') }}" - -source: - path: ../../.. - -build: - noarch: python - number: 0 - script: "{{ PYTHON }} setup.py install --single-version-externally-managed --record record.txt" - script_env: - - PKG_NAME - - VERSION - entry_points: - - activity-browser = activity_browser:run_activity_browser - - activity-browser-cleanup = activity_browser.bwutils:cleanup - -requirements: - build: - - python - - setuptools - run: # dependencies are added via github action from ci/recipe/stable/meta.yaml - -about: - home: https://github.com/LCA-ActivityBrowser/activity-browser - license: LGPL3+ - license_family: LGPL - license_file: LICENSE.txt - summary: "{{ os.environ.get('SUMMARY', 'Development version of the Activity Browser') }}" - description: | - The Activity Browser is a graphical user interface for the [brightway2](https://brightway.dev/) - advanced life cycle assessment framework. More details and installation instructions can be found - on [github](https://github.com/LCA-ActivityBrowser/activity-browser). - This is the development version. For the stable release install the `activity-browser` package. From 9addfb892b34e529ed2bd9fb542d02485e2e4590 Mon Sep 17 00:00:00 2001 From: Adrian Haas <11636405+haasad@users.noreply.github.com> Date: Mon, 30 Oct 2023 08:10:37 +0100 Subject: [PATCH 30/72] Install canary updates (#1093) * Increase timeout to 30 min Originally the idea was that installation should never take longer than 12 min, otherwise we should get a warning with the failed pipeline. But unfortuantely conda currently takes longer than 12 minutes to solve the environment. * Add mamba install canary * Download artifacts to start implementing env comparison * Looks like on linux it can take more than 30 minutes :-( * 60min not enough on linux, increasing to 120 * Completely remove timeout, default is 6 hours * Use solver libmamba option * Add diff step to compare installations * Use node16 actions * Re-add the 12 timeout * Split diff into separate steps * Yq action only runs on linux * debug yq formatting step * Simplify * more fighting with yq action * artifacts are apparently directories in this case * Try again with while loop * Run on all os, but only 3.9 * ignore diff exit code * Run canary install for 3.8 and 3.9 again --------- Co-authored-by: haasad --- .github/workflows/install-canary.yaml | 65 ++++++++++++++++++++++++++- 1 file changed, 63 insertions(+), 2 deletions(-) diff --git a/.github/workflows/install-canary.yaml b/.github/workflows/install-canary.yaml index 1222e6a13..f00197b25 100644 --- a/.github/workflows/install-canary.yaml +++ b/.github/workflows/install-canary.yaml @@ -9,8 +9,8 @@ on: jobs: canary-installs: - runs-on: ${{ matrix.os }} timeout-minutes: 12 + runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: @@ -26,7 +26,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install activity-browser run: | - conda create -y -n ab -c conda-forge activity-browser python=${{ matrix.python-version }} + conda create -y -n ab -c conda-forge --solver libmamba activity-browser python=${{ matrix.python-version }} - name: Environment info run: | conda activate ab @@ -38,3 +38,64 @@ jobs: with: name: env-${{ matrix.os }}-${{ matrix.python-version }} path: env.yaml + + # also run install with micromamba instead of conda to have a timining comparison + canary-installs-mamba: + runs-on: ${{ matrix.os }} + timeout-minutes: 30 + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + python-version: ['3.9'] + defaults: + run: + shell: bash -l {0} + steps: + - name: Setup python ${{ matrix.python-version }} conda environment + uses: mamba-org/setup-micromamba@v1 + with: + environment-name: ab + create-args: >- + python=${{ matrix.python-version }} + activity-browser + - name: Environment info + run: | + micromamba list + micromamba env export + micromamba env export > env.yaml + - name: Upload final environment as artifact + uses: actions/upload-artifact@v3 + with: + name: env-${{ matrix.os }}-${{ matrix.python-version }}-mamba + path: env.yaml + + conda-micromamba-comparison: + runs-on: ubuntu-latest + defaults: + run: + shell: bash -l {0} + needs: + - canary-installs + - canary-installs-mamba + steps: + - name: Download all artifacts + uses: actions/download-artifact@v3 + - name: show files + run: | + ls -la + - name: correct yaml formatting + # add correct indentation to make diffing possible + uses: mikefarah/yq@master + with: + cmd: | + ls | grep mamba | while read d; do yq -i $d/env.yaml; done + - name: diff ubuntu + run: | + diff -u env-ubuntu-latest-3.9* || : + - name: diff windows + run: | + diff -u env-windows-latest-3.9* || : + - name: diff macos + run: | + diff -u env-macos-latest-3.9* || : From 2165bb6913059e1198582e4e64d0c515ff2af37c Mon Sep 17 00:00:00 2001 From: bsteubing <33026150+bsteubing@users.noreply.github.com> Date: Thu, 2 Nov 2023 16:48:17 +0100 Subject: [PATCH 31/72] Adding error message if no scenario file is loaded in a scenario LCA (#1085) --- activity_browser/bwutils/superstructure/mlca.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/activity_browser/bwutils/superstructure/mlca.py b/activity_browser/bwutils/superstructure/mlca.py index 9b2c21015..a3f0937ff 100644 --- a/activity_browser/bwutils/superstructure/mlca.py +++ b/activity_browser/bwutils/superstructure/mlca.py @@ -27,6 +27,8 @@ class SuperstructureMLCA(MLCA): } def __init__(self, cs_name: str, df: pd.DataFrame): + assert isinstance(df, pd.DataFrame), "Check if you have provided at least 1 reference flow, 1 impact category " \ + "and 1 scenario file. " assert not df.empty, "Cannot run analysis without data." self.scenario_names = scenario_names_from_df(df) self.total = len(self.scenario_names) From 3f1638cc00cffc1f66e0d88c067a0e7804388f74 Mon Sep 17 00:00:00 2001 From: marc-vdm Date: Sat, 21 Oct 2023 13:32:48 +0200 Subject: [PATCH 32/72] Updated and improved contributing file --- .github/PULL_REQUEST_TEMPLATE.md | 7 +- CONTRIBUTING.md | 343 ++++++++++++++++++------------- 2 files changed, 204 insertions(+), 146 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index b838bb2eb..3ea00f6fb 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -7,7 +7,8 @@ Contributors guide: ./CONTRIBUTING.md ## Checklist - [ ] Keep pull requests small so they can be easily reviewed. @@ -16,7 +17,9 @@ Remove items that do not apply. For completed items, change [ ] to [x]. - [ ] Categorize the PR by setting a good title and adding one of the labels: `bug`, `feature`, `ui`, `change`, `documentation`, `breaking`, `ci` as they show up in the changelog -- [ ] Link this PR to related issues. +- [ ] Link this PR to related issues by using + [closing keywords](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue). +- [ ] Request a review from another developer.