Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Prune metrics: regression 9/n #6637

Merged
merged 6 commits into from
Mar 23, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).

[#6636](https://github.com/PyTorchLightning/pytorch-lightning/pull/6636),

[#6637](https://github.com/PyTorchLightning/pytorch-lightning/pull/6637),

)


Expand Down
88 changes: 5 additions & 83 deletions pytorch_lightning/metrics/functional/psnr.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,46 +14,12 @@
from typing import Optional, Tuple, Union

import torch
from torchmetrics.utilities import reduce
from torchmetrics.functional import psnr as _psnr

from pytorch_lightning.utilities import rank_zero_warn


def _psnr_compute(
sum_squared_error: torch.Tensor,
n_obs: torch.Tensor,
data_range: torch.Tensor,
base: float = 10.0,
reduction: str = 'elementwise_mean',
) -> torch.Tensor:
psnr_base_e = 2 * torch.log(data_range) - torch.log(sum_squared_error / n_obs)
psnr = psnr_base_e * (10 / torch.log(torch.tensor(base)))
return reduce(psnr, reduction=reduction)


def _psnr_update(preds: torch.Tensor,
target: torch.Tensor,
dim: Optional[Union[int, Tuple[int, ...]]] = None) -> Tuple[torch.Tensor, torch.Tensor]:
if dim is None:
sum_squared_error = torch.sum(torch.pow(preds - target, 2))
n_obs = torch.tensor(target.numel(), device=target.device)
return sum_squared_error, n_obs

sum_squared_error = torch.sum(torch.pow(preds - target, 2), dim=dim)

if isinstance(dim, int):
dim_list = [dim]
else:
dim_list = list(dim)
if not dim_list:
n_obs = torch.tensor(target.numel(), device=target.device)
else:
n_obs = torch.tensor(target.size(), device=target.device)[dim_list].prod()
n_obs = n_obs.expand_as(sum_squared_error)

return sum_squared_error, n_obs
from pytorch_lightning.utilities.deprecation import deprecated


@deprecated(target=_psnr, ver_deprecate="1.3.0", ver_remove="1.5.0")
def psnr(
preds: torch.Tensor,
target: torch.Tensor,
Expand All @@ -63,50 +29,6 @@ def psnr(
dim: Optional[Union[int, Tuple[int, ...]]] = None,
) -> torch.Tensor:
"""
Computes the peak signal-to-noise ratio

Args:
preds: estimated signal
target: groun truth signal
data_range:
the range of the data. If None, it is determined from the data (max - min). ``data_range`` must be given
when ``dim`` is not None.
base: a base of a logarithm to use (default: 10)
reduction: a method to reduce metric score over labels.

- ``'elementwise_mean'``: takes the mean (default)
- ``'sum'``: takes the sum
- ``'none'``: no reduction will be applied

dim:
Dimensions to reduce PSNR scores over provided as either an integer or a list of integers. Default is
None meaning scores will be reduced across all dimensions.
Return:
Tensor with PSNR score

Raises:
ValueError:
If ``dim`` is not ``None`` and ``data_range`` is not provided.

Example:
>>> from pytorch_lightning.metrics.functional import psnr
>>> pred = torch.tensor([[0.0, 1.0], [2.0, 3.0]])
>>> target = torch.tensor([[3.0, 2.0], [1.0, 0.0]])
>>> psnr(pred, target)
tensor(2.5527)

.. deprecated::
Use :func:`torchmetrics.functional.psnr`. Will be removed in v1.5.0.
"""
if dim is None and reduction != 'elementwise_mean':
rank_zero_warn(f'The `reduction={reduction}` will not have any effect when `dim` is None.')

if data_range is None:
if dim is not None:
# Maybe we could use `torch.amax(target, dim=dim) - torch.amin(target, dim=dim)` in PyTorch 1.7 to calculate
# `data_range` in the future.
raise ValueError("The `data_range` must be given when `dim` is not None.")

data_range = target.max() - target.min()
else:
data_range = torch.tensor(float(data_range))
sum_squared_error, n_obs = _psnr_update(preds, target, dim=dim)
return _psnr_compute(sum_squared_error, n_obs, data_range, base=base, reduction=reduction)
124 changes: 6 additions & 118 deletions pytorch_lightning/metrics/functional/r2score.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,133 +11,21 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple

import torch
from torchmetrics.utilities.checks import _check_same_shape
from torchmetrics.functional import r2score as _r2score

from pytorch_lightning.utilities import rank_zero_warn


def _r2score_update(
preds: torch.tensor,
target: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
_check_same_shape(preds, target)
if preds.ndim > 2:
raise ValueError(
'Expected both prediction and target to be 1D or 2D tensors,'
f' but recevied tensors with dimension {preds.shape}'
)
if len(preds) < 2:
raise ValueError('Needs atleast two samples to calculate r2 score.')

sum_error = torch.sum(target, dim=0)
sum_squared_error = torch.sum(torch.pow(target, 2.0), dim=0)
residual = torch.sum(torch.pow(target - preds, 2.0), dim=0)
total = target.size(0)

return sum_squared_error, sum_error, residual, total


def _r2score_compute(
sum_squared_error: torch.Tensor,
sum_error: torch.Tensor,
residual: torch.Tensor,
total: torch.Tensor,
adjusted: int = 0,
multioutput: str = "uniform_average"
) -> torch.Tensor:
mean_error = sum_error / total
diff = sum_squared_error - sum_error * mean_error
raw_scores = 1 - (residual / diff)

if multioutput == "raw_values":
r2score = raw_scores
elif multioutput == "uniform_average":
r2score = torch.mean(raw_scores)
elif multioutput == "variance_weighted":
diff_sum = torch.sum(diff)
r2score = torch.sum(diff / diff_sum * raw_scores)
else:
raise ValueError(
'Argument `multioutput` must be either `raw_values`,'
f' `uniform_average` or `variance_weighted`. Received {multioutput}.'
)

if adjusted < 0 or not isinstance(adjusted, int):
raise ValueError('`adjusted` parameter should be an integer larger or' ' equal to 0.')

if adjusted != 0:
if adjusted > total - 1:
rank_zero_warn(
"More independent regressions than datapoints in"
" adjusted r2 score. Falls back to standard r2 score.", UserWarning
)
elif adjusted == total - 1:
rank_zero_warn("Division by zero in adjusted r2 score. Falls back to" " standard r2 score.", UserWarning)
else:
r2score = 1 - (1 - r2score) * (total - 1) / (total - adjusted - 1)
return r2score
from pytorch_lightning.utilities.deprecation import deprecated


@deprecated(target=_r2score, ver_deprecate="1.3.0", ver_remove="1.5.0")
def r2score(
preds: torch.Tensor,
target: torch.Tensor,
adjusted: int = 0,
multioutput: str = "uniform_average",
) -> torch.Tensor:
r"""
Computes r2 score also known as `coefficient of determination
<https://en.wikipedia.org/wiki/Coefficient_of_determination>`_:

.. math:: R^2 = 1 - \frac{SS_res}{SS_tot}

where :math:`SS_res=\sum_i (y_i - f(x_i))^2` is the sum of residual squares, and
:math:`SS_tot=\sum_i (y_i - \bar{y})^2` is total sum of squares. Can also calculate
adjusted r2 score given by

.. math:: R^2_adj = 1 - \frac{(1-R^2)(n-1)}{n-k-1}

where the parameter :math:`k` (the number of independent regressors) should
be provided as the ``adjusted`` argument.

Args:
preds: estimated labels
target: ground truth labels
adjusted: number of independent regressors for calculating adjusted r2 score.
Default 0 (standard r2 score).
multioutput: Defines aggregation in the case of multiple output scores. Can be one
of the following strings (default is ``'uniform_average'``.):

* ``'raw_values'`` returns full set of scores
* ``'uniform_average'`` scores are uniformly averaged
* ``'variance_weighted'`` scores are weighted by their individual variances

Raises:
ValueError:
If both ``preds`` and ``targets`` are not ``1D`` or ``2D`` tensors.
ValueError:
If ``len(preds)`` is less than ``2``
since at least ``2`` sampels are needed to calculate r2 score.
ValueError:
If ``multioutput`` is not one of ``raw_values``,
``uniform_average`` or ``variance_weighted``.
ValueError:
If ``adjusted`` is not an ``integer`` greater than ``0``.

Example:

>>> from pytorch_lightning.metrics.functional import r2score
>>> target = torch.tensor([3, -0.5, 2, 7])
>>> preds = torch.tensor([2.5, 0.0, 2, 8])
>>> r2score(preds, target)
tensor(0.9486)

>>> target = torch.tensor([[0.5, 1], [-1, 1], [7, -6]])
>>> preds = torch.tensor([[0, 2], [-1, 2], [8, -5]])
>>> r2score(preds, target, multioutput='raw_values')
tensor([0.9654, 0.9082])
"""
sum_squared_error, sum_error, residual, total = _r2score_update(preds, target)
return _r2score_compute(sum_squared_error, sum_error, residual, total, adjusted, multioutput)
.. deprecated::
Use :func:`torchmetrics.functional.r2score`. Will be removed in v1.5.0.
"""
Loading