Skip to content

Commit

Permalink
Correct installation path. Build and test badge. (#14)
Browse files Browse the repository at this point in the history
- Fixes installation path in README (#12)
- Adds build and test workflow plus README badge
- Fixes linter issues
  • Loading branch information
gallego-posada authored Mar 19, 2022
1 parent 562f64b commit f691bf7
Show file tree
Hide file tree
Showing 12 changed files with 96 additions and 95 deletions.
15 changes: 0 additions & 15 deletions .github/ISSUE_TEMPLATE.md

This file was deleted.

48 changes: 15 additions & 33 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
@@ -1,19 +1,20 @@
name: build
name: build and test

on:
workflow_dispatch:
push:
branches: [master, dev]
branches: [master, dev, build-workflow]
pull_request:
branches: ["*"]

jobs:
build:
runs-on: ${{ matrix.os }}
runs-on: ${{ matrix.platform }}
strategy:
matrix:
python-version: [3.8]
os: [ubuntu-latest, macos-latest, windows-latest]
# No gpu workflow: https://github.com/apache/singa/issues/802
platform: [ubuntu-latest, macos-latest, windows-latest]
python-version: ['3.8', '3.9']
# No gpu workflow yet!: https://github.com/apache/singa/issues/802

steps:
- uses: actions/checkout@v2
Expand All @@ -22,31 +23,12 @@ jobs:
with:
python-version: ${{ matrix.python-version }}

- name: Install dependencies others
if: ${{ matrix.os != 'windows-latest' }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install .[dev]
# Windows is treated differently, as PyTorch is not uploaded to Pypi atm
- name: Install dependencies windows
if: ${{ matrix.os == 'windows-latest' }}
run: |
python -m pip install --upgrade pip
pip install torch==1.10.0+cpu -f https://download.pytorch.org/whl/torch_stable.html
pip install .[dev]
- name: Lint with flake8
run: |
# stop the build if there are Python syntax errors or undefined in Cooper folder (ignoring tutorials)
flake8 --ignore=E203,E266,E501 cooper --count --exit-zero --max-doc-length=80 --max-line-length=88 --statistics
# Uncomment below to apply flake8 to tutorials
# flake8 --ignore=E203,E266,E501 cooper --count --exit-zero --max-doc-length=80 --max-line-length=88 --statistics
- name: Lint with Black
run: |
black --check --diff .
- name: Sort imports with isort
run: |
isort cooper
isort tutorials
- name: Test with pytest
run: |
pytest .
python -m pip install --upgrade pip
python -m pip install tox tox-gh-actions
- name: Test with tox
run: tox
env:
PLATFORM: ${{ matrix.platform }}
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

[![LICENSE](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/cooper-org/cooper/tree/master/LICENSE)
[![DOCS](https://readthedocs.org/projects/cooper/badge/?version=latest)](https://cooper.readthedocs.io/en/latest/?version=latest)
[![Build and Test](https://github.com/cooper-org/cooper/actions/workflows/build.yml/badge.svg)](https://github.com/cooper-org/cooper/actions/workflows/build.yml)
[![Codecov](https://codecov.io/gh/cooper-org/cooper/branch/dev/graph/badge.svg?token=1AKM2EQ7RT)](https://codecov.io/gh/cooper-org/cooper/branch/dev/graph/badge.svg?token=1AKM2EQ7RT)

## About
Expand Down Expand Up @@ -93,7 +94,7 @@ for iter_num in range(5000):
### Basic Installation

```bash
pip install git@github.com:cooper-org/cooper.git#egg=cooper
pip install git+https://github.com/cooper-org/cooper.git
```

### Development Installation
Expand Down
12 changes: 6 additions & 6 deletions cooper/constrained_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from .problem import CMPState, Formulation


class ConstrainedOptimizer(torch.optim.Optimizer):
class ConstrainedOptimizer:
"""
Optimizes a :py:class:`~cooper.problem.ConstrainedMinimizationProblem`
given its :py:class:`~cooper.problem.Formulation`.
Expand Down Expand Up @@ -69,8 +69,6 @@ def __init__(
self.alternating = alternating
self.dual_restarts = dual_restarts

super().__init__(self.primal_optimizer.param_groups, {})

self.sanity_checks()

def sanity_checks(self):
Expand Down Expand Up @@ -166,6 +164,7 @@ def step(
"""

if self.cmp.is_constrained and not hasattr(self.dual_optimizer, "param_groups"):
assert self.dual_optimizer is not None and callable(self.dual_optimizer)
# Checks if needed and instantiates dual_optimizer
self.dual_optimizer = self.dual_optimizer(self.formulation.dual_parameters)

Expand All @@ -174,7 +173,7 @@ def step(

if self.is_extrapolation:
# Store parameter copy and compute t+1/2 iterates
self.primal_optimizer.extrapolation()
self.primal_optimizer.extrapolation() # type: ignore
if self.cmp.is_constrained:
# Call to dual_step flips sign of gradients, then triggers call
# to dual_optimizer.extrapolation and projects dual variables
Expand All @@ -188,7 +187,7 @@ def step(
# extrapolation step
lagrangian = self.formulation.composite_objective(
closure, *closure_args, **closure_kwargs
)
) # type: ignore

# Populate gradients at extrapolation point
self.formulation.custom_backward(lagrangian)
Expand All @@ -212,8 +211,9 @@ def step(
# Skip gradient wrt model parameters to avoid wasteful
# computation, as we only need gradient wrt multipliers.
with torch.no_grad():
assert closure is not None
self.cmp.state = closure(*closure_args, **closure_kwargs)
lagrangian = self.formulation.get_composite_objective(self.cmp)
lagrangian = self.formulation.composite_objective(self.cmp) # type: ignore

# Zero-out gradients for dual variables since they were
# already populated earlier.
Expand Down
27 changes: 19 additions & 8 deletions cooper/lagrangian_formulation.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
"""Lagrangian formulation"""

import abc
from typing import List, Optional, Tuple, Union
from typing import Callable, List, Optional, Tuple, Union, no_type_check

import torch

Expand Down Expand Up @@ -59,10 +59,17 @@ def state(self) -> Tuple[Union[None, torch.Tensor]]:
:py:class:`torch.Tensor` values. Note that the *values* are a different
type from the :py:class:`cooper.multipliers.DenseMultiplier` objects.
"""
ineq_state = None if self.ineq_multipliers is None else self.ineq_multipliers()
eq_state = None if self.eq_multipliers is None else self.eq_multipliers()
if self.ineq_multipliers is None:
ineq_state = None
else:
ineq_state = self.ineq_multipliers()

if self.eq_multipliers is None:
eq_state = None
else:
eq_state = self.eq_multipliers()

return ineq_state, eq_state
return ineq_state, eq_state # type: ignore

def create_state(self, cmp_state):
"""Initialize dual variables and optimizers given list of equality and
Expand Down Expand Up @@ -158,7 +165,7 @@ def weighted_violation(
if not has_defect:
# We should always have at least the regular defects, if not, then
# the problem instance does not have `constraint_type` constraints
proxy_violation = 0.0
proxy_violation = torch.tensor([0.0])
else:
multipliers = getattr(self, constraint_type + "_multipliers")()

Expand Down Expand Up @@ -188,9 +195,10 @@ class LagrangianFormulation(BaseLagrangianFormulation):
aug_lag_coefficient: Coefficient used for the augmented Lagrangian.
"""

@no_type_check
def composite_objective(
self,
closure: callable,
closure: Callable[..., CMPState],
*closure_args,
write_state: bool = True,
**closure_kwargs
Expand Down Expand Up @@ -262,23 +270,26 @@ def composite_objective(
# TODO(JGP): [2] I guess one would like to filter based on
# non-proxy feasibility but penalize based on the proxy defect
if ineq_defect is not None:
assert self.ineq_multipliers is not None
ineq_filter = (ineq_defect >= 0) + (self.ineq_multipliers() > 0)
ineq_square = torch.sum(torch.square(ineq_defect[ineq_filter]))
else:
ineq_square = 0.0
ineq_square = torch.tensor([0.0])

if eq_defect is not None:
eq_square = torch.sum(torch.square(eq_defect))
else:
eq_square = 0.0
eq_square = torch.tensor([0.0])

lagrangian += self.aug_lag_coefficient * (ineq_square + eq_square)

else:
assert cmp_state.loss is not None
lagrangian = cmp_state.loss

return lagrangian

@no_type_check
def _populate_gradients(
self, lagrangian: torch.Tensor, ignore_primal: bool = False
):
Expand Down
16 changes: 8 additions & 8 deletions cooper/optim.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import functools
import math
from collections.abc import Iterable
from typing import Tuple, Type
from typing import Callable, List, Tuple, Type, no_type_check

import torch

Expand All @@ -13,6 +13,7 @@
RMSprop = torch.optim.RMSprop


@no_type_check
def partial(optim_cls: Type[torch.optim.Optimizer], **optim_kwargs):
"""
Partially instantiates an optimizer class. This approach is preferred over
Expand Down Expand Up @@ -60,8 +61,6 @@ class PartialOptimizer(optim_cls):

# written by Hugo Berard ([email protected]) while at Facebook.

required = object()


class ExtragradientOptimizer(torch.optim.Optimizer):
"""Base class for optimizers with extrapolation step.
Expand All @@ -75,7 +74,7 @@ class ExtragradientOptimizer(torch.optim.Optimizer):

def __init__(self, params: Iterable, defaults: dict):
super(ExtragradientOptimizer, self).__init__(params, defaults)
self.params_copy = []
self.params_copy: List[torch.nn.Parameter] = []

def update(self, p, group):
raise NotImplementedError
Expand All @@ -101,7 +100,7 @@ def extrapolation(self):
# Update the current parameters
p.data.add_(u)

def step(self, closure: callable = None):
def step(self, closure: Callable = None):
"""Performs a single optimization step.
Args:
Expand Down Expand Up @@ -148,7 +147,8 @@ class ExtraSGD(ExtragradientOptimizer):
.. note::
The implementation of SGD with Momentum/Nesterov subtly differs from
:cite:t:`sutskever2013initialization`. and implementations in some other frameworks.
:cite:t:`sutskever2013initialization`. and implementations in some other
frameworks.
Considering the specific case of Momentum, the update can be written as
Expand All @@ -172,13 +172,13 @@ class ExtraSGD(ExtragradientOptimizer):
def __init__(
self,
params: Iterable,
lr: float = required,
lr: float,
momentum: float = 0,
dampening: float = 0,
weight_decay: float = 0,
nesterov: bool = False,
):
if lr is not required and lr < 0.0:
if lr is None or lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
Expand Down
8 changes: 4 additions & 4 deletions cooper/problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ class ConstrainedMinimizationProblem(abc.ABC):

def __init__(self, is_constrained: bool = False):
self.is_constrained = is_constrained
self._state = None
self._state = CMPState()

@property
def state(self) -> CMPState:
Expand Down Expand Up @@ -122,12 +122,12 @@ def composite_objective(self):
pass

@abc.abstractmethod
def _populate_gradients(self):
def _populate_gradients(self, *args, **kwargs):
"""Performs the actual backward computation and populates the gradients
for the trainable parameters for the dual variables."""
pass

def custom_backward(self, lagrangian: torch.Tensor):
def custom_backward(self, *args, **kwargs):
"""Alias for :py:meth:`._populate_gradients` to keep the ``backward``
naming convention used in Pytorch. We avoid naming this method
``backward`` as it is a method of the ``LagrangianFormulation`` object
Expand All @@ -137,4 +137,4 @@ def custom_backward(self, lagrangian: torch.Tensor):
lagrangian: Value of the computed Lagrangian based on which the
gradients for the primal and dual variables are populated.
"""
self._populate_gradients(lagrangian)
self._populate_gradients(*args, **kwargs)
12 changes: 8 additions & 4 deletions cooper/state_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,14 @@ class StateLogger:
"""

def __init__(self, save_metrics: List[str]):
self.logger = OrderedDict()
self.logger: OrderedDict = OrderedDict()
self.save_metrics = save_metrics

def store_metrics(
self, formulation: Formulation, step_id: int, partial_dict: dict = None
self,
formulation: Formulation,
step_id: int,
partial_dict: dict = None,
):
"""
Store a new screenshot of the metrics.
Expand All @@ -48,8 +51,9 @@ def store_metrics(
elif metric == "eq_multipliers":
aux_dict[metric] = deepcopy(formulation.state()[1].data)

aux_dict.update(partial_dict)
self.save_metrics = list(set(self.save_metrics + list(partial_dict.keys())))
if partial_dict is not None:
aux_dict.update(partial_dict)
self.save_metrics = list(set(self.save_metrics + list(partial_dict.keys())))

self.logger[step_id] = aux_dict

Expand Down
2 changes: 2 additions & 0 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ dev =
numpy>=1.21.0
pre-commit>=2.16.0
pytest>=6.2.5
pytest-cov>=3.0.0
twine>=3.8.0
tox>=3.14.0
docs =
Expand All @@ -80,6 +81,7 @@ tests =
numpy==1.22.0
pre-commit==2.16.0
pytest==6.2.5
pytest-cov==3.0.0
tox==3.14.0
examples =
matplotlib>=3.5.0
Expand Down
3 changes: 2 additions & 1 deletion tests/helpers/testing_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,8 @@ def get_device_skip(aim_device, cuda_available):
device = "cuda"
skip = SkipTest(do_skip=False)
else:
# # Intended to test GPU execution, but GPU not available. Skipping test.
# Intended to test GPU execution, but GPU not available. Skipping
# test.
device = None
skip = SkipTest(do_skip=True, skip_reason="CUDA is not available")
else:
Expand Down
3 changes: 2 additions & 1 deletion tests/helpers/toy_2d_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,8 @@ def closure(self, params):
)

if self.use_proxy_ineq:
# Using **slightly** different functions for the proxy constraints
# Using **slightly** different functions for the proxy
# constraints
proxy_ineq_defect = torch.stack(
[
-0.9 * param_x - param_y + 1.0, # x + y \ge 1
Expand Down
Loading

0 comments on commit f691bf7

Please sign in to comment.