From f691bf706e6c4f10da4e130cf1d14c4aac2c4b82 Mon Sep 17 00:00:00 2001 From: Jose Gallego-Posada Date: Sat, 19 Mar 2022 04:06:05 -0400 Subject: [PATCH] Correct installation path. Build and test badge. (#14) - Fixes installation path in README (#12) - Adds build and test workflow plus README badge - Fixes linter issues --- .github/ISSUE_TEMPLATE.md | 15 ---------- .github/workflows/build.yml | 48 ++++++++++---------------------- README.md | 3 +- cooper/constrained_optimizer.py | 12 ++++---- cooper/lagrangian_formulation.py | 27 ++++++++++++------ cooper/optim.py | 16 +++++------ cooper/problem.py | 8 +++--- cooper/state_logger.py | 12 +++++--- setup.cfg | 2 ++ tests/helpers/testing_utils.py | 3 +- tests/helpers/toy_2d_problem.py | 3 +- tox.ini | 42 ++++++++++++++++++---------- 12 files changed, 96 insertions(+), 95 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE.md diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md deleted file mode 100644 index 485e23d7..00000000 --- a/.github/ISSUE_TEMPLATE.md +++ /dev/null @@ -1,15 +0,0 @@ -* Constrained Optimization in Pytorch version: -* Python version: -* Operating System: - -### Description - -Describe what you were trying to get done. -Tell us what happened, what went wrong, and what you expected to happen. - -### What I Did - -``` -Paste the command(s) you ran and the output. -If there was a crash, please include the traceback here. -``` diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 541bb9aa..3019ba83 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,19 +1,20 @@ -name: build +name: build and test on: + workflow_dispatch: push: - branches: [master, dev] + branches: [master, dev, build-workflow] pull_request: branches: ["*"] jobs: build: - runs-on: ${{ matrix.os }} + runs-on: ${{ matrix.platform }} strategy: matrix: - python-version: [3.8] - os: [ubuntu-latest, macos-latest, windows-latest] - # No gpu workflow: https://github.com/apache/singa/issues/802 + platform: [ubuntu-latest, macos-latest, windows-latest] + python-version: ['3.8', '3.9'] + # No gpu workflow yet!: https://github.com/apache/singa/issues/802 steps: - uses: actions/checkout@v2 @@ -22,31 +23,12 @@ jobs: with: python-version: ${{ matrix.python-version }} - - name: Install dependencies others - if: ${{ matrix.os != 'windows-latest' }} + - name: Install dependencies run: | - python -m pip install --upgrade pip - pip install .[dev] - # Windows is treated differently, as PyTorch is not uploaded to Pypi atm - - name: Install dependencies windows - if: ${{ matrix.os == 'windows-latest' }} - run: | - python -m pip install --upgrade pip - pip install torch==1.10.0+cpu -f https://download.pytorch.org/whl/torch_stable.html - pip install .[dev] - - name: Lint with flake8 - run: | - # stop the build if there are Python syntax errors or undefined in Cooper folder (ignoring tutorials) - flake8 --ignore=E203,E266,E501 cooper --count --exit-zero --max-doc-length=80 --max-line-length=88 --statistics - # Uncomment below to apply flake8 to tutorials - # flake8 --ignore=E203,E266,E501 cooper --count --exit-zero --max-doc-length=80 --max-line-length=88 --statistics - - name: Lint with Black - run: | - black --check --diff . - - name: Sort imports with isort - run: | - isort cooper - isort tutorials - - name: Test with pytest - run: | - pytest . + python -m pip install --upgrade pip + python -m pip install tox tox-gh-actions + + - name: Test with tox + run: tox + env: + PLATFORM: ${{ matrix.platform }} diff --git a/README.md b/README.md index 23968bc8..6ff58071 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,7 @@ [![LICENSE](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/cooper-org/cooper/tree/master/LICENSE) [![DOCS](https://readthedocs.org/projects/cooper/badge/?version=latest)](https://cooper.readthedocs.io/en/latest/?version=latest) +[![Build and Test](https://github.com/cooper-org/cooper/actions/workflows/build.yml/badge.svg)](https://github.com/cooper-org/cooper/actions/workflows/build.yml) [![Codecov](https://codecov.io/gh/cooper-org/cooper/branch/dev/graph/badge.svg?token=1AKM2EQ7RT)](https://codecov.io/gh/cooper-org/cooper/branch/dev/graph/badge.svg?token=1AKM2EQ7RT) ## About @@ -93,7 +94,7 @@ for iter_num in range(5000): ### Basic Installation ```bash -pip install git@github.com:cooper-org/cooper.git#egg=cooper +pip install git+https://github.com/cooper-org/cooper.git ``` ### Development Installation diff --git a/cooper/constrained_optimizer.py b/cooper/constrained_optimizer.py index b82af908..3a7d8c0c 100644 --- a/cooper/constrained_optimizer.py +++ b/cooper/constrained_optimizer.py @@ -15,7 +15,7 @@ from .problem import CMPState, Formulation -class ConstrainedOptimizer(torch.optim.Optimizer): +class ConstrainedOptimizer: """ Optimizes a :py:class:`~cooper.problem.ConstrainedMinimizationProblem` given its :py:class:`~cooper.problem.Formulation`. @@ -69,8 +69,6 @@ def __init__( self.alternating = alternating self.dual_restarts = dual_restarts - super().__init__(self.primal_optimizer.param_groups, {}) - self.sanity_checks() def sanity_checks(self): @@ -166,6 +164,7 @@ def step( """ if self.cmp.is_constrained and not hasattr(self.dual_optimizer, "param_groups"): + assert self.dual_optimizer is not None and callable(self.dual_optimizer) # Checks if needed and instantiates dual_optimizer self.dual_optimizer = self.dual_optimizer(self.formulation.dual_parameters) @@ -174,7 +173,7 @@ def step( if self.is_extrapolation: # Store parameter copy and compute t+1/2 iterates - self.primal_optimizer.extrapolation() + self.primal_optimizer.extrapolation() # type: ignore if self.cmp.is_constrained: # Call to dual_step flips sign of gradients, then triggers call # to dual_optimizer.extrapolation and projects dual variables @@ -188,7 +187,7 @@ def step( # extrapolation step lagrangian = self.formulation.composite_objective( closure, *closure_args, **closure_kwargs - ) + ) # type: ignore # Populate gradients at extrapolation point self.formulation.custom_backward(lagrangian) @@ -212,8 +211,9 @@ def step( # Skip gradient wrt model parameters to avoid wasteful # computation, as we only need gradient wrt multipliers. with torch.no_grad(): + assert closure is not None self.cmp.state = closure(*closure_args, **closure_kwargs) - lagrangian = self.formulation.get_composite_objective(self.cmp) + lagrangian = self.formulation.composite_objective(self.cmp) # type: ignore # Zero-out gradients for dual variables since they were # already populated earlier. diff --git a/cooper/lagrangian_formulation.py b/cooper/lagrangian_formulation.py index 47cac54e..cc346f98 100644 --- a/cooper/lagrangian_formulation.py +++ b/cooper/lagrangian_formulation.py @@ -1,7 +1,7 @@ """Lagrangian formulation""" import abc -from typing import List, Optional, Tuple, Union +from typing import Callable, List, Optional, Tuple, Union, no_type_check import torch @@ -59,10 +59,17 @@ def state(self) -> Tuple[Union[None, torch.Tensor]]: :py:class:`torch.Tensor` values. Note that the *values* are a different type from the :py:class:`cooper.multipliers.DenseMultiplier` objects. """ - ineq_state = None if self.ineq_multipliers is None else self.ineq_multipliers() - eq_state = None if self.eq_multipliers is None else self.eq_multipliers() + if self.ineq_multipliers is None: + ineq_state = None + else: + ineq_state = self.ineq_multipliers() + + if self.eq_multipliers is None: + eq_state = None + else: + eq_state = self.eq_multipliers() - return ineq_state, eq_state + return ineq_state, eq_state # type: ignore def create_state(self, cmp_state): """Initialize dual variables and optimizers given list of equality and @@ -158,7 +165,7 @@ def weighted_violation( if not has_defect: # We should always have at least the regular defects, if not, then # the problem instance does not have `constraint_type` constraints - proxy_violation = 0.0 + proxy_violation = torch.tensor([0.0]) else: multipliers = getattr(self, constraint_type + "_multipliers")() @@ -188,9 +195,10 @@ class LagrangianFormulation(BaseLagrangianFormulation): aug_lag_coefficient: Coefficient used for the augmented Lagrangian. """ + @no_type_check def composite_objective( self, - closure: callable, + closure: Callable[..., CMPState], *closure_args, write_state: bool = True, **closure_kwargs @@ -262,23 +270,26 @@ def composite_objective( # TODO(JGP): [2] I guess one would like to filter based on # non-proxy feasibility but penalize based on the proxy defect if ineq_defect is not None: + assert self.ineq_multipliers is not None ineq_filter = (ineq_defect >= 0) + (self.ineq_multipliers() > 0) ineq_square = torch.sum(torch.square(ineq_defect[ineq_filter])) else: - ineq_square = 0.0 + ineq_square = torch.tensor([0.0]) if eq_defect is not None: eq_square = torch.sum(torch.square(eq_defect)) else: - eq_square = 0.0 + eq_square = torch.tensor([0.0]) lagrangian += self.aug_lag_coefficient * (ineq_square + eq_square) else: + assert cmp_state.loss is not None lagrangian = cmp_state.loss return lagrangian + @no_type_check def _populate_gradients( self, lagrangian: torch.Tensor, ignore_primal: bool = False ): diff --git a/cooper/optim.py b/cooper/optim.py index d444f14f..c3cd729b 100644 --- a/cooper/optim.py +++ b/cooper/optim.py @@ -2,7 +2,7 @@ import functools import math from collections.abc import Iterable -from typing import Tuple, Type +from typing import Callable, List, Tuple, Type, no_type_check import torch @@ -13,6 +13,7 @@ RMSprop = torch.optim.RMSprop +@no_type_check def partial(optim_cls: Type[torch.optim.Optimizer], **optim_kwargs): """ Partially instantiates an optimizer class. This approach is preferred over @@ -60,8 +61,6 @@ class PartialOptimizer(optim_cls): # written by Hugo Berard (berard.hugo@gmail.com) while at Facebook. -required = object() - class ExtragradientOptimizer(torch.optim.Optimizer): """Base class for optimizers with extrapolation step. @@ -75,7 +74,7 @@ class ExtragradientOptimizer(torch.optim.Optimizer): def __init__(self, params: Iterable, defaults: dict): super(ExtragradientOptimizer, self).__init__(params, defaults) - self.params_copy = [] + self.params_copy: List[torch.nn.Parameter] = [] def update(self, p, group): raise NotImplementedError @@ -101,7 +100,7 @@ def extrapolation(self): # Update the current parameters p.data.add_(u) - def step(self, closure: callable = None): + def step(self, closure: Callable = None): """Performs a single optimization step. Args: @@ -148,7 +147,8 @@ class ExtraSGD(ExtragradientOptimizer): .. note:: The implementation of SGD with Momentum/Nesterov subtly differs from - :cite:t:`sutskever2013initialization`. and implementations in some other frameworks. + :cite:t:`sutskever2013initialization`. and implementations in some other + frameworks. Considering the specific case of Momentum, the update can be written as @@ -172,13 +172,13 @@ class ExtraSGD(ExtragradientOptimizer): def __init__( self, params: Iterable, - lr: float = required, + lr: float, momentum: float = 0, dampening: float = 0, weight_decay: float = 0, nesterov: bool = False, ): - if lr is not required and lr < 0.0: + if lr is None or lr < 0.0: raise ValueError("Invalid learning rate: {}".format(lr)) if momentum < 0.0: raise ValueError("Invalid momentum value: {}".format(momentum)) diff --git a/cooper/problem.py b/cooper/problem.py index e86c85af..870b1a63 100644 --- a/cooper/problem.py +++ b/cooper/problem.py @@ -57,7 +57,7 @@ class ConstrainedMinimizationProblem(abc.ABC): def __init__(self, is_constrained: bool = False): self.is_constrained = is_constrained - self._state = None + self._state = CMPState() @property def state(self) -> CMPState: @@ -122,12 +122,12 @@ def composite_objective(self): pass @abc.abstractmethod - def _populate_gradients(self): + def _populate_gradients(self, *args, **kwargs): """Performs the actual backward computation and populates the gradients for the trainable parameters for the dual variables.""" pass - def custom_backward(self, lagrangian: torch.Tensor): + def custom_backward(self, *args, **kwargs): """Alias for :py:meth:`._populate_gradients` to keep the ``backward`` naming convention used in Pytorch. We avoid naming this method ``backward`` as it is a method of the ``LagrangianFormulation`` object @@ -137,4 +137,4 @@ def custom_backward(self, lagrangian: torch.Tensor): lagrangian: Value of the computed Lagrangian based on which the gradients for the primal and dual variables are populated. """ - self._populate_gradients(lagrangian) + self._populate_gradients(*args, **kwargs) diff --git a/cooper/state_logger.py b/cooper/state_logger.py index 1264702d..c23621c5 100644 --- a/cooper/state_logger.py +++ b/cooper/state_logger.py @@ -17,11 +17,14 @@ class StateLogger: """ def __init__(self, save_metrics: List[str]): - self.logger = OrderedDict() + self.logger: OrderedDict = OrderedDict() self.save_metrics = save_metrics def store_metrics( - self, formulation: Formulation, step_id: int, partial_dict: dict = None + self, + formulation: Formulation, + step_id: int, + partial_dict: dict = None, ): """ Store a new screenshot of the metrics. @@ -48,8 +51,9 @@ def store_metrics( elif metric == "eq_multipliers": aux_dict[metric] = deepcopy(formulation.state()[1].data) - aux_dict.update(partial_dict) - self.save_metrics = list(set(self.save_metrics + list(partial_dict.keys()))) + if partial_dict is not None: + aux_dict.update(partial_dict) + self.save_metrics = list(set(self.save_metrics + list(partial_dict.keys()))) self.logger[step_id] = aux_dict diff --git a/setup.cfg b/setup.cfg index c926e7b0..d062eb73 100644 --- a/setup.cfg +++ b/setup.cfg @@ -55,6 +55,7 @@ dev = numpy>=1.21.0 pre-commit>=2.16.0 pytest>=6.2.5 + pytest-cov>=3.0.0 twine>=3.8.0 tox>=3.14.0 docs = @@ -80,6 +81,7 @@ tests = numpy==1.22.0 pre-commit==2.16.0 pytest==6.2.5 + pytest-cov==3.0.0 tox==3.14.0 examples = matplotlib>=3.5.0 diff --git a/tests/helpers/testing_utils.py b/tests/helpers/testing_utils.py index d31ab38b..b0d51a1f 100644 --- a/tests/helpers/testing_utils.py +++ b/tests/helpers/testing_utils.py @@ -20,7 +20,8 @@ def get_device_skip(aim_device, cuda_available): device = "cuda" skip = SkipTest(do_skip=False) else: - # # Intended to test GPU execution, but GPU not available. Skipping test. + # Intended to test GPU execution, but GPU not available. Skipping + # test. device = None skip = SkipTest(do_skip=True, skip_reason="CUDA is not available") else: diff --git a/tests/helpers/toy_2d_problem.py b/tests/helpers/toy_2d_problem.py index 3cc0466b..e67ab40a 100644 --- a/tests/helpers/toy_2d_problem.py +++ b/tests/helpers/toy_2d_problem.py @@ -30,7 +30,8 @@ def closure(self, params): ) if self.use_proxy_ineq: - # Using **slightly** different functions for the proxy constraints + # Using **slightly** different functions for the proxy + # constraints proxy_ineq_defect = torch.stack( [ -0.9 * param_x - param_y + 1.0, # x + y \ge 1 diff --git a/tox.ini b/tox.ini index 851f87f4..b22196cf 100644 --- a/tox.ini +++ b/tox.ini @@ -1,25 +1,39 @@ [tox] -envlist = py36, py37, py38, flake8 +minversion = 3.8.0 +envlist = py{38,39}-torch{18, 19, 110}-{linux,macos,windows}, lint, mypy +isolated_build = true -[travis] +[gh-actions] python = - 3.8: py38 - 3.7: py37 - 3.6: py36 + 3.8: py38, lint, mypy + 3.9: py39 -[testenv:flake8] -basepython = python -deps = flake8 -commands = flake8 cooper tests +[gh-actions:env] +PLATFORM = + ubuntu-latest: linux + macos-latest: macos + windows-latest: windows [testenv] setenv = PYTHONPATH = {toxinidir} +extras = tests deps = - -r{toxinidir}/requirements_dev.txt -; If you want to make tox run the tests with the same versions, create a -; requirements.txt with the pinned versions and uncomment the following line: -; -r{toxinidir}/requirements.txt + torch18: torch >= 1.8.0, < 1.9.0 + torch19: torch >= 1.9.0, < 1.10.0 + torch110: torch >= 1.10.0, < 1.11.0 commands = - pip install -U pip pytest --basetemp={envtmpdir} + +[testenv:lint] +basepython = python3.8 +extras = dev +commands = + flake8 --ignore=E203,E266,E501 cooper --count --exit-zero --max-doc-length=80 --max-line-length=88 --per-file-ignores=__init__.py:F401 --statistics + black --check --diff . + isort cooper + isort tutorials + +[testenv:mypy] +basepython = python3.8 +commands = mypy cooper