From 08b77b282e2801b4a2d8ad365f9977877d3d6ed9 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Mon, 22 Mar 2021 18:19:46 +0100 Subject: [PATCH 1/6] psnr --- pytorch_lightning/metrics/functional/psnr.py | 88 +----------- pytorch_lightning/metrics/regression/psnr.py | 123 ++--------------- tests/metrics/regression/test_psnr.py | 133 ------------------- tests/metrics/test_remove_1-5_metrics.py | 15 ++- 4 files changed, 26 insertions(+), 333 deletions(-) delete mode 100644 tests/metrics/regression/test_psnr.py diff --git a/pytorch_lightning/metrics/functional/psnr.py b/pytorch_lightning/metrics/functional/psnr.py index 0b50ea092b7fa..dd7aa44ae628e 100644 --- a/pytorch_lightning/metrics/functional/psnr.py +++ b/pytorch_lightning/metrics/functional/psnr.py @@ -14,46 +14,12 @@ from typing import Optional, Tuple, Union import torch -from torchmetrics.utilities import reduce +from torchmetrics.functional import psnr as _psnr -from pytorch_lightning.utilities import rank_zero_warn - - -def _psnr_compute( - sum_squared_error: torch.Tensor, - n_obs: torch.Tensor, - data_range: torch.Tensor, - base: float = 10.0, - reduction: str = 'elementwise_mean', -) -> torch.Tensor: - psnr_base_e = 2 * torch.log(data_range) - torch.log(sum_squared_error / n_obs) - psnr = psnr_base_e * (10 / torch.log(torch.tensor(base))) - return reduce(psnr, reduction=reduction) - - -def _psnr_update(preds: torch.Tensor, - target: torch.Tensor, - dim: Optional[Union[int, Tuple[int, ...]]] = None) -> Tuple[torch.Tensor, torch.Tensor]: - if dim is None: - sum_squared_error = torch.sum(torch.pow(preds - target, 2)) - n_obs = torch.tensor(target.numel(), device=target.device) - return sum_squared_error, n_obs - - sum_squared_error = torch.sum(torch.pow(preds - target, 2), dim=dim) - - if isinstance(dim, int): - dim_list = [dim] - else: - dim_list = list(dim) - if not dim_list: - n_obs = torch.tensor(target.numel(), device=target.device) - else: - n_obs = torch.tensor(target.size(), device=target.device)[dim_list].prod() - n_obs = n_obs.expand_as(sum_squared_error) - - return sum_squared_error, n_obs +from pytorch_lightning.utilities.deprecation import deprecated +@deprecated(target=_psnr, ver_deprecate="1.3.0", ver_remove="1.5.0") def psnr( preds: torch.Tensor, target: torch.Tensor, @@ -63,50 +29,6 @@ def psnr( dim: Optional[Union[int, Tuple[int, ...]]] = None, ) -> torch.Tensor: """ - Computes the peak signal-to-noise ratio - - Args: - preds: estimated signal - target: groun truth signal - data_range: - the range of the data. If None, it is determined from the data (max - min). ``data_range`` must be given - when ``dim`` is not None. - base: a base of a logarithm to use (default: 10) - reduction: a method to reduce metric score over labels. - - - ``'elementwise_mean'``: takes the mean (default) - - ``'sum'``: takes the sum - - ``'none'``: no reduction will be applied - - dim: - Dimensions to reduce PSNR scores over provided as either an integer or a list of integers. Default is - None meaning scores will be reduced across all dimensions. - Return: - Tensor with PSNR score - - Raises: - ValueError: - If ``dim`` is not ``None`` and ``data_range`` is not provided. - - Example: - >>> from pytorch_lightning.metrics.functional import psnr - >>> pred = torch.tensor([[0.0, 1.0], [2.0, 3.0]]) - >>> target = torch.tensor([[3.0, 2.0], [1.0, 0.0]]) - >>> psnr(pred, target) - tensor(2.5527) - + .. deprecated:: + Use :func:`torchmetrics.functional.psnr`. Will be removed in v1.5.0. """ - if dim is None and reduction != 'elementwise_mean': - rank_zero_warn(f'The `reduction={reduction}` will not have any effect when `dim` is None.') - - if data_range is None: - if dim is not None: - # Maybe we could use `torch.amax(target, dim=dim) - torch.amin(target, dim=dim)` in PyTorch 1.7 to calculate - # `data_range` in the future. - raise ValueError("The `data_range` must be given when `dim` is not None.") - - data_range = target.max() - target.min() - else: - data_range = torch.tensor(float(data_range)) - sum_squared_error, n_obs = _psnr_update(preds, target, dim=dim) - return _psnr_compute(sum_squared_error, n_obs, data_range, base=base, reduction=reduction) diff --git a/pytorch_lightning/metrics/regression/psnr.py b/pytorch_lightning/metrics/regression/psnr.py index 746ff1e52d574..85b8eceaa24c5 100644 --- a/pytorch_lightning/metrics/regression/psnr.py +++ b/pytorch_lightning/metrics/regression/psnr.py @@ -11,61 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Optional, Sequence, Tuple, Union +from typing import Any, Optional, Tuple, Union -import torch -from torchmetrics import Metric +from torchmetrics import PSNR as _PSNR -from pytorch_lightning import utilities -from pytorch_lightning.metrics.functional.psnr import _psnr_compute, _psnr_update +from pytorch_lightning.utilities.deprecation import deprecated -class PSNR(Metric): - r""" - Computes `peak signal-to-noise ratio `_ (PSNR): - - .. math:: \text{PSNR}(I, J) = 10 * \log_{10} \left(\frac{\max(I)^2}{\text{MSE}(I, J)}\right) - - Where :math:`\text{MSE}` denotes the `mean-squared-error - `_ function. - - Args: - data_range: - the range of the data. If None, it is determined from the data (max - min). - The ``data_range`` must be given when ``dim`` is not None. - base: a base of a logarithm to use (default: 10) - reduction: a method to reduce metric score over labels. - - - ``'elementwise_mean'``: takes the mean (default) - - ``'sum'``: takes the sum - - ``'none'``: no reduction will be applied - - dim: - Dimensions to reduce PSNR scores over, provided as either an integer or a list of integers. Default is - None meaning scores will be reduced across all dimensions and all batches. - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - - Raises: - ValueError: - If ``dim`` is not ``None`` and ``data_range`` is not given. - - Example: - - >>> from pytorch_lightning.metrics import PSNR - >>> psnr = PSNR() - >>> preds = torch.tensor([[0.0, 1.0], [2.0, 3.0]]) - >>> target = torch.tensor([[3.0, 2.0], [1.0, 0.0]]) - >>> psnr(preds, target) - tensor(2.5527) - - """ +class PSNR(_PSNR): + @deprecated(target=_PSNR, ver_deprecate="1.3.0", ver_remove="1.5.0") def __init__( self, data_range: Optional[float] = None, @@ -76,71 +31,9 @@ def __init__( dist_sync_on_step: bool = False, process_group: Optional[Any] = None, ): - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - ) - - if dim is None and reduction != 'elementwise_mean': - utilities.rank_zero_warn(f'The `reduction={reduction}` will not have any effect when `dim` is None.') - - if dim is None: - self.add_state("sum_squared_error", default=torch.tensor(0.0), dist_reduce_fx="sum") - self.add_state("total", default=torch.tensor(0), dist_reduce_fx="sum") - else: - self.add_state("sum_squared_error", default=[]) - self.add_state("total", default=[]) - - if data_range is None: - if dim is not None: - # Maybe we could use `torch.amax(target, dim=dim) - torch.amin(target, dim=dim)` in PyTorch 1.7 to - # calculate `data_range` in the future. - raise ValueError("The `data_range` must be given when `dim` is not None.") - - self.data_range = None - self.add_state("min_target", default=torch.tensor(0.0), dist_reduce_fx=torch.min) - self.add_state("max_target", default=torch.tensor(0.0), dist_reduce_fx=torch.max) - else: - self.register_buffer("data_range", torch.tensor(float(data_range))) - self.base = base - self.reduction = reduction - self.dim = tuple(dim) if isinstance(dim, Sequence) else dim - - def update(self, preds: torch.Tensor, target: torch.Tensor): """ - Update state with predictions and targets. + This implementation refers to :class:`~torchmetrics.PSNR`. - Args: - preds: Predictions from model - target: Ground truth values + .. deprecated:: + Use :class:`~torchmetrics.PSNR`. Will be removed in v1.5.0. """ - sum_squared_error, n_obs = _psnr_update(preds, target, dim=self.dim) - if self.dim is None: - if self.data_range is None: - # keep track of min and max target values - self.min_target = min(target.min(), self.min_target) - self.max_target = max(target.max(), self.max_target) - - self.sum_squared_error += sum_squared_error - self.total += n_obs - else: - self.sum_squared_error.append(sum_squared_error) - self.total.append(n_obs) - - def compute(self): - """ - Compute peak signal-to-noise ratio over state. - """ - if self.data_range is not None: - data_range = self.data_range - else: - data_range = self.max_target - self.min_target - - if self.dim is None: - sum_squared_error = self.sum_squared_error - total = self.total - else: - sum_squared_error = torch.cat([values.flatten() for values in self.sum_squared_error]) - total = torch.cat([values.flatten() for values in self.total]) - return _psnr_compute(sum_squared_error, total, data_range, base=self.base, reduction=self.reduction) diff --git a/tests/metrics/regression/test_psnr.py b/tests/metrics/regression/test_psnr.py deleted file mode 100644 index eb07fffb9d55c..0000000000000 --- a/tests/metrics/regression/test_psnr.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from collections import namedtuple -from functools import partial - -import numpy as np -import pytest -import torch -from skimage.metrics import peak_signal_noise_ratio - -from pytorch_lightning.metrics.functional import psnr -from pytorch_lightning.metrics.regression import PSNR -from tests.metrics.utils import BATCH_SIZE, MetricTester, NUM_BATCHES - -torch.manual_seed(42) - -Input = namedtuple('Input', ["preds", "target"]) - -_input_size = (NUM_BATCHES, BATCH_SIZE, 32, 32) -_inputs = [ - Input( - preds=torch.randint(n_cls_pred, _input_size, dtype=torch.float), - target=torch.randint(n_cls_target, _input_size, dtype=torch.float), - ) for n_cls_pred, n_cls_target in [(10, 10), (5, 10), (10, 5)] -] - - -def _to_sk_peak_signal_noise_ratio_inputs(value, dim): - value = value.numpy() - batches = value[None] if value.ndim == len(_input_size) - 1 else value - - if dim is None: - return [batches] - - num_dims = np.size(dim) - if not num_dims: - return batches - - inputs = [] - for batch in batches: - batch = np.moveaxis(batch, dim, np.arange(-num_dims, 0)) - psnr_input_shape = batch.shape[-num_dims:] - inputs.extend(batch.reshape(-1, *psnr_input_shape)) - return inputs - - -def _sk_psnr(preds, target, data_range, reduction, dim): - sk_preds_lists = _to_sk_peak_signal_noise_ratio_inputs(preds, dim=dim) - sk_target_lists = _to_sk_peak_signal_noise_ratio_inputs(target, dim=dim) - np_reduce_map = {"elementwise_mean": np.mean, "none": np.array, "sum": np.sum} - return np_reduce_map[reduction]([ - peak_signal_noise_ratio(sk_target, sk_preds, data_range=data_range) - for sk_target, sk_preds in zip(sk_target_lists, sk_preds_lists) - ]) - - -def _base_e_sk_psnr(preds, target, data_range, reduction, dim): - return _sk_psnr(preds, target, data_range, reduction, dim) * np.log(10) - - -@pytest.mark.parametrize( - "preds, target, data_range, reduction, dim", - [ - (_inputs[0].preds, _inputs[0].target, 10, "elementwise_mean", None), - (_inputs[1].preds, _inputs[1].target, 10, "elementwise_mean", None), - (_inputs[2].preds, _inputs[2].target, 5, "elementwise_mean", None), - (_inputs[2].preds, _inputs[2].target, 5, "elementwise_mean", 1), - (_inputs[2].preds, _inputs[2].target, 5, "elementwise_mean", (1, 2)), - (_inputs[2].preds, _inputs[2].target, 5, "sum", (1, 2)), - ], -) -@pytest.mark.parametrize( - "base, sk_metric", - [ - (10.0, _sk_psnr), - (2.718281828459045, _base_e_sk_psnr), - ], -) -class TestPSNR(MetricTester): - - @pytest.mark.parametrize("ddp", [True, False]) - @pytest.mark.parametrize("dist_sync_on_step", [True, False]) - def test_psnr(self, preds, target, data_range, base, reduction, dim, sk_metric, ddp, dist_sync_on_step): - _args = {"data_range": data_range, "base": base, "reduction": reduction, "dim": dim} - self.run_class_metric_test( - ddp, - preds, - target, - PSNR, - partial(sk_metric, data_range=data_range, reduction=reduction, dim=dim), - metric_args=_args, - dist_sync_on_step=dist_sync_on_step, - ) - - def test_psnr_functional(self, preds, target, sk_metric, data_range, base, reduction, dim): - _args = {"data_range": data_range, "base": base, "reduction": reduction, "dim": dim} - self.run_functional_metric_test( - preds, - target, - psnr, - partial(sk_metric, data_range=data_range, reduction=reduction, dim=dim), - metric_args=_args, - ) - - -@pytest.mark.parametrize("reduction", ["none", "sum"]) -def test_reduction_for_dim_none(reduction): - match = f"The `reduction={reduction}` will not have any effect when `dim` is None." - with pytest.warns(UserWarning, match=match): - PSNR(reduction=reduction, dim=None) - - with pytest.warns(UserWarning, match=match): - psnr(_inputs[0].preds, _inputs[0].target, reduction=reduction, dim=None) - - -def test_missing_data_range(): - with pytest.raises(ValueError): - PSNR(data_range=None, dim=0) - - with pytest.raises(ValueError): - psnr(_inputs[0].preds, _inputs[0].target, data_range=None, dim=0) diff --git a/tests/metrics/test_remove_1-5_metrics.py b/tests/metrics/test_remove_1-5_metrics.py index eaf17ec0792da..14a2d086cfc7c 100644 --- a/tests/metrics/test_remove_1-5_metrics.py +++ b/tests/metrics/test_remove_1-5_metrics.py @@ -35,7 +35,7 @@ PrecisionRecallCurve, Recall, ROC, - StatScores, + StatScores, PSNR, ) from pytorch_lightning.metrics.functional import ( auc, @@ -55,7 +55,7 @@ precision_recall_curve, recall, roc, - stat_scores, + stat_scores, psnr, ) from pytorch_lightning.metrics.functional.accuracy import accuracy from pytorch_lightning.metrics.functional.mean_relative_error import mean_relative_error @@ -290,3 +290,14 @@ def test_v1_5_metric_regress(): with pytest.deprecated_call(match='It will be removed in v1.5.0'): res = mean_squared_log_error(x, y) assert torch.allclose(res, torch.tensor(0.0207), atol=1e-4) + + PSNR.__init__.warned = False + with pytest.deprecated_call(match='It will be removed in v1.5.0'): + PSNR(num_classes=1) + + preds = torch.tensor([[0.0, 1.0], [2.0, 3.0]]) + target = torch.tensor([[3.0, 2.0], [1.0, 0.0]]) + psnr.warned = False + with pytest.deprecated_call(match='It will be removed in v1.5.0'): + res = psnr(preds, target) + assert torch.allclose(res, torch.tensor(2.5527), atol=1e-4) From 38fed83ea116da01775b3a944c0e7a9dee742132 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Mon, 22 Mar 2021 18:24:41 +0100 Subject: [PATCH 2/6] r2score --- .../metrics/functional/r2score.py | 124 +----------------- .../metrics/regression/r2score.py | 122 +---------------- tests/metrics/regression/test_r2score.py | 114 ---------------- tests/metrics/test_remove_1-5_metrics.py | 21 ++- 4 files changed, 31 insertions(+), 350 deletions(-) delete mode 100644 tests/metrics/regression/test_r2score.py diff --git a/pytorch_lightning/metrics/functional/r2score.py b/pytorch_lightning/metrics/functional/r2score.py index d3f1090564a88..49273d9cefaed 100644 --- a/pytorch_lightning/metrics/functional/r2score.py +++ b/pytorch_lightning/metrics/functional/r2score.py @@ -11,133 +11,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Tuple import torch -from torchmetrics.utilities.checks import _check_same_shape +from torchmetrics.functional import r2score as _r2score -from pytorch_lightning.utilities import rank_zero_warn - - -def _r2score_update( - preds: torch.tensor, - target: torch.Tensor, -) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - _check_same_shape(preds, target) - if preds.ndim > 2: - raise ValueError( - 'Expected both prediction and target to be 1D or 2D tensors,' - f' but recevied tensors with dimension {preds.shape}' - ) - if len(preds) < 2: - raise ValueError('Needs atleast two samples to calculate r2 score.') - - sum_error = torch.sum(target, dim=0) - sum_squared_error = torch.sum(torch.pow(target, 2.0), dim=0) - residual = torch.sum(torch.pow(target - preds, 2.0), dim=0) - total = target.size(0) - - return sum_squared_error, sum_error, residual, total - - -def _r2score_compute( - sum_squared_error: torch.Tensor, - sum_error: torch.Tensor, - residual: torch.Tensor, - total: torch.Tensor, - adjusted: int = 0, - multioutput: str = "uniform_average" -) -> torch.Tensor: - mean_error = sum_error / total - diff = sum_squared_error - sum_error * mean_error - raw_scores = 1 - (residual / diff) - - if multioutput == "raw_values": - r2score = raw_scores - elif multioutput == "uniform_average": - r2score = torch.mean(raw_scores) - elif multioutput == "variance_weighted": - diff_sum = torch.sum(diff) - r2score = torch.sum(diff / diff_sum * raw_scores) - else: - raise ValueError( - 'Argument `multioutput` must be either `raw_values`,' - f' `uniform_average` or `variance_weighted`. Received {multioutput}.' - ) - - if adjusted < 0 or not isinstance(adjusted, int): - raise ValueError('`adjusted` parameter should be an integer larger or' ' equal to 0.') - - if adjusted != 0: - if adjusted > total - 1: - rank_zero_warn( - "More independent regressions than datapoints in" - " adjusted r2 score. Falls back to standard r2 score.", UserWarning - ) - elif adjusted == total - 1: - rank_zero_warn("Division by zero in adjusted r2 score. Falls back to" " standard r2 score.", UserWarning) - else: - r2score = 1 - (1 - r2score) * (total - 1) / (total - adjusted - 1) - return r2score +from pytorch_lightning.utilities.deprecation import deprecated +@deprecated(target=_r2score, ver_deprecate="1.3.0", ver_remove="1.5.0") def r2score( preds: torch.Tensor, target: torch.Tensor, adjusted: int = 0, multioutput: str = "uniform_average", ) -> torch.Tensor: - r""" - Computes r2 score also known as `coefficient of determination - `_: - - .. math:: R^2 = 1 - \frac{SS_res}{SS_tot} - - where :math:`SS_res=\sum_i (y_i - f(x_i))^2` is the sum of residual squares, and - :math:`SS_tot=\sum_i (y_i - \bar{y})^2` is total sum of squares. Can also calculate - adjusted r2 score given by - - .. math:: R^2_adj = 1 - \frac{(1-R^2)(n-1)}{n-k-1} - - where the parameter :math:`k` (the number of independent regressors) should - be provided as the ``adjusted`` argument. - - Args: - preds: estimated labels - target: ground truth labels - adjusted: number of independent regressors for calculating adjusted r2 score. - Default 0 (standard r2 score). - multioutput: Defines aggregation in the case of multiple output scores. Can be one - of the following strings (default is ``'uniform_average'``.): - - * ``'raw_values'`` returns full set of scores - * ``'uniform_average'`` scores are uniformly averaged - * ``'variance_weighted'`` scores are weighted by their individual variances - - Raises: - ValueError: - If both ``preds`` and ``targets`` are not ``1D`` or ``2D`` tensors. - ValueError: - If ``len(preds)`` is less than ``2`` - since at least ``2`` sampels are needed to calculate r2 score. - ValueError: - If ``multioutput`` is not one of ``raw_values``, - ``uniform_average`` or ``variance_weighted``. - ValueError: - If ``adjusted`` is not an ``integer`` greater than ``0``. - - Example: - - >>> from pytorch_lightning.metrics.functional import r2score - >>> target = torch.tensor([3, -0.5, 2, 7]) - >>> preds = torch.tensor([2.5, 0.0, 2, 8]) - >>> r2score(preds, target) - tensor(0.9486) - - >>> target = torch.tensor([[0.5, 1], [-1, 1], [7, -6]]) - >>> preds = torch.tensor([[0, 2], [-1, 2], [8, -5]]) - >>> r2score(preds, target, multioutput='raw_values') - tensor([0.9654, 0.9082]) """ - sum_squared_error, sum_error, residual, total = _r2score_update(preds, target) - return _r2score_compute(sum_squared_error, sum_error, residual, total, adjusted, multioutput) + .. deprecated:: + Use :func:`torchmetrics.functional.r2score`. Will be removed in v1.5.0. + """ diff --git a/pytorch_lightning/metrics/regression/r2score.py b/pytorch_lightning/metrics/regression/r2score.py index 8156b8bc72d48..780cb3bde9f85 100644 --- a/pytorch_lightning/metrics/regression/r2score.py +++ b/pytorch_lightning/metrics/regression/r2score.py @@ -13,81 +13,14 @@ # limitations under the License. from typing import Any, Callable, Optional -import torch -from torchmetrics import Metric +from torchmetrics import R2Score as _R2Score -from pytorch_lightning.metrics.functional.r2score import _r2score_compute, _r2score_update +from pytorch_lightning.utilities.deprecation import deprecated -class R2Score(Metric): - r""" - Computes r2 score also known as `coefficient of determination - `_: - - .. math:: R^2 = 1 - \frac{SS_res}{SS_tot} - - where :math:`SS_res=\sum_i (y_i - f(x_i))^2` is the sum of residual squares, and - :math:`SS_tot=\sum_i (y_i - \bar{y})^2` is total sum of squares. Can also calculate - adjusted r2 score given by - - .. math:: R^2_adj = 1 - \frac{(1-R^2)(n-1)}{n-k-1} - - where the parameter :math:`k` (the number of independent regressors) should - be provided as the `adjusted` argument. - - Forward accepts - - - ``preds`` (float tensor): ``(N,)`` or ``(N, M)`` (multioutput) - - ``target`` (float tensor): ``(N,)`` or ``(N, M)`` (multioutput) - - In the case of multioutput, as default the variances will be uniformly - averaged over the additional dimensions. Please see argument `multioutput` - for changing this behavior. - - Args: - num_outputs: - Number of outputs in multioutput setting (default is 1) - adjusted: - number of independent regressors for calculating adjusted r2 score. - Default 0 (standard r2 score). - multioutput: - Defines aggregation in the case of multiple output scores. Can be one - of the following strings (default is ``'uniform_average'``.): - - * ``'raw_values'`` returns full set of scores - * ``'uniform_average'`` scores are uniformly averaged - * ``'variance_weighted'`` scores are weighted by their individual variances - - compute_on_step: - Forward only calls ``update()`` and return None if this is set to False. default: True - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step. default: False - process_group: - Specify the process group on which synchronization is called. default: None (which selects the entire world) - - Raises: - ValueError: - If ``adjusted`` parameter is not an integer larger or equal to 0. - ValueError: - If ``multioutput`` is not one of ``"raw_values"``, ``"uniform_average"`` or ``"variance_weighted"``. - - Example: - - >>> from pytorch_lightning.metrics import R2Score - >>> target = torch.tensor([3, -0.5, 2, 7]) - >>> preds = torch.tensor([2.5, 0.0, 2, 8]) - >>> r2score = R2Score() - >>> r2score(preds, target) - tensor(0.9486) - - >>> target = torch.tensor([[0.5, 1], [-1, 1], [7, -6]]) - >>> preds = torch.tensor([[0, 2], [-1, 2], [8, -5]]) - >>> r2score = R2Score(num_outputs=2, multioutput='raw_values') - >>> r2score(preds, target) - tensor([0.9654, 0.9082]) - """ +class R2Score(_R2Score): + @deprecated(target=_R2Score, ver_deprecate="1.3.0", ver_remove="1.5.0") def __init__( self, num_outputs: int = 1, @@ -98,50 +31,9 @@ def __init__( process_group: Optional[Any] = None, dist_sync_fn: Callable = None, ): - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) - - self.num_outputs = num_outputs - - if adjusted < 0 or not isinstance(adjusted, int): - raise ValueError('`adjusted` parameter should be an integer larger or equal to 0.') - self.adjusted = adjusted - - allowed_multioutput = ('raw_values', 'uniform_average', 'variance_weighted') - if multioutput not in allowed_multioutput: - raise ValueError( - f'Invalid input to argument `multioutput`. Choose one of the following: {allowed_multioutput}' - ) - self.multioutput = multioutput - - self.add_state("sum_squared_error", default=torch.zeros(self.num_outputs), dist_reduce_fx="sum") - self.add_state("sum_error", default=torch.zeros(self.num_outputs), dist_reduce_fx="sum") - self.add_state("residual", default=torch.zeros(self.num_outputs), dist_reduce_fx="sum") - self.add_state("total", default=torch.tensor(0), dist_reduce_fx="sum") - - def update(self, preds: torch.Tensor, target: torch.Tensor): - """ - Update state with predictions and targets. - - Args: - preds: Predictions from model - target: Ground truth values """ - sum_squared_error, sum_error, residual, total = _r2score_update(preds, target) + This implementation refers to :class:`~torchmetrics.Accuracy`. - self.sum_squared_error += sum_squared_error - self.sum_error += sum_error - self.residual += residual - self.total += total - - def compute(self) -> torch.Tensor: - """ - Computes r2 score over the metric states. + .. deprecated:: + Use :class:`~torchmetrics.Accuracy`. Will be removed in v1.5.0. """ - return _r2score_compute( - self.sum_squared_error, self.sum_error, self.residual, self.total, self.adjusted, self.multioutput - ) diff --git a/tests/metrics/regression/test_r2score.py b/tests/metrics/regression/test_r2score.py deleted file mode 100644 index 232b003e6116a..0000000000000 --- a/tests/metrics/regression/test_r2score.py +++ /dev/null @@ -1,114 +0,0 @@ -from collections import namedtuple -from functools import partial - -import pytest -import torch -from sklearn.metrics import r2_score as sk_r2score - -from pytorch_lightning.metrics.functional import r2score -from pytorch_lightning.metrics.regression import R2Score -from tests.metrics.utils import BATCH_SIZE, MetricTester, NUM_BATCHES - -torch.manual_seed(42) - -num_targets = 5 - -Input = namedtuple('Input', ["preds", "target"]) - -_single_target_inputs = Input( - preds=torch.rand(NUM_BATCHES, BATCH_SIZE), - target=torch.rand(NUM_BATCHES, BATCH_SIZE), -) - -_multi_target_inputs = Input( - preds=torch.rand(NUM_BATCHES, BATCH_SIZE, num_targets), - target=torch.rand(NUM_BATCHES, BATCH_SIZE, num_targets), -) - - -def _single_target_sk_metric(preds, target, adjusted, multioutput): - sk_preds = preds.view(-1).numpy() - sk_target = target.view(-1).numpy() - r2_score = sk_r2score(sk_target, sk_preds, multioutput=multioutput) - if adjusted != 0: - r2_score = 1 - (1 - r2_score) * (sk_preds.shape[0] - 1) / (sk_preds.shape[0] - adjusted - 1) - return r2_score - - -def _multi_target_sk_metric(preds, target, adjusted, multioutput): - sk_preds = preds.view(-1, num_targets).numpy() - sk_target = target.view(-1, num_targets).numpy() - r2_score = sk_r2score(sk_target, sk_preds, multioutput=multioutput) - if adjusted != 0: - r2_score = 1 - (1 - r2_score) * (sk_preds.shape[0] - 1) / (sk_preds.shape[0] - adjusted - 1) - return r2_score - - -@pytest.mark.parametrize("adjusted", [0, 5, 10]) -@pytest.mark.parametrize("multioutput", ['raw_values', 'uniform_average', 'variance_weighted']) -@pytest.mark.parametrize( - "preds, target, sk_metric, num_outputs", - [ - (_single_target_inputs.preds, _single_target_inputs.target, _single_target_sk_metric, 1), - (_multi_target_inputs.preds, _multi_target_inputs.target, _multi_target_sk_metric, num_targets), - ], -) -class TestR2Score(MetricTester): - - @pytest.mark.parametrize("ddp", [True, False]) - @pytest.mark.parametrize("dist_sync_on_step", [True, False]) - def test_r2(self, adjusted, multioutput, preds, target, sk_metric, num_outputs, ddp, dist_sync_on_step): - self.run_class_metric_test( - ddp, - preds, - target, - R2Score, - partial(sk_metric, adjusted=adjusted, multioutput=multioutput), - dist_sync_on_step, - metric_args=dict(adjusted=adjusted, multioutput=multioutput, num_outputs=num_outputs), - ) - - def test_r2_functional(self, adjusted, multioutput, preds, target, sk_metric, num_outputs): - self.run_functional_metric_test( - preds, - target, - r2score, - partial(sk_metric, adjusted=adjusted, multioutput=multioutput), - metric_args=dict(adjusted=adjusted, multioutput=multioutput), - ) - - -def test_error_on_different_shape(metric_class=R2Score): - metric = metric_class() - with pytest.raises(RuntimeError, match='Predictions and targets are expected to have the same shape'): - metric(torch.randn(100, ), torch.randn(50, )) - - -def test_error_on_multidim_tensors(metric_class=R2Score): - metric = metric_class() - with pytest.raises( - ValueError, - match=r'Expected both prediction and target to be 1D or 2D tensors,' - r' but recevied tensors with dimension .' - ): - metric(torch.randn(10, 20, 5), torch.randn(10, 20, 5)) - - -def test_error_on_too_few_samples(metric_class=R2Score): - metric = metric_class() - with pytest.raises(ValueError, match='Needs atleast two samples to calculate r2 score.'): - metric(torch.randn(1, ), torch.randn(1, )) - - -def test_warning_on_too_large_adjusted(metric_class=R2Score): - metric = metric_class(adjusted=10) - - with pytest.warns( - UserWarning, - match="More independent regressions than datapoints in" - " adjusted r2 score. Falls back to standard r2 score." - ): - metric(torch.randn(10, ), torch.randn(10, )) - - with pytest.warns(UserWarning, match="Division by zero in adjusted r2 score. Falls back to" " standard r2 score."): - metric(torch.randn(11, ), torch.randn(11, )) diff --git a/tests/metrics/test_remove_1-5_metrics.py b/tests/metrics/test_remove_1-5_metrics.py index 14a2d086cfc7c..28a33ea332ef6 100644 --- a/tests/metrics/test_remove_1-5_metrics.py +++ b/tests/metrics/test_remove_1-5_metrics.py @@ -33,9 +33,11 @@ MetricCollection, Precision, PrecisionRecallCurve, + PSNR, + R2Score, Recall, ROC, - StatScores, PSNR, + StatScores, ) from pytorch_lightning.metrics.functional import ( auc, @@ -53,9 +55,11 @@ precision, precision_recall, precision_recall_curve, + psnr, + r2score, recall, roc, - stat_scores, psnr, + stat_scores, ) from pytorch_lightning.metrics.functional.accuracy import accuracy from pytorch_lightning.metrics.functional.mean_relative_error import mean_relative_error @@ -293,7 +297,11 @@ def test_v1_5_metric_regress(): PSNR.__init__.warned = False with pytest.deprecated_call(match='It will be removed in v1.5.0'): - PSNR(num_classes=1) + PSNR() + + R2Score.__init__.warned = False + with pytest.deprecated_call(match='It will be removed in v1.5.0'): + R2Score() preds = torch.tensor([[0.0, 1.0], [2.0, 3.0]]) target = torch.tensor([[3.0, 2.0], [1.0, 0.0]]) @@ -301,3 +309,10 @@ def test_v1_5_metric_regress(): with pytest.deprecated_call(match='It will be removed in v1.5.0'): res = psnr(preds, target) assert torch.allclose(res, torch.tensor(2.5527), atol=1e-4) + + target = torch.tensor([3, -0.5, 2, 7]) + preds = torch.tensor([2.5, 0.0, 2, 8]) + r2score.warned = False + with pytest.deprecated_call(match='It will be removed in v1.5.0'): + res = r2score(preds, target) + assert torch.allclose(res, torch.tensor(0.9486), atol=1e-4) \ No newline at end of file From 556d803fc2d65e668c8bd34cc5d17dd34eddf295 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Mon, 22 Mar 2021 18:28:16 +0100 Subject: [PATCH 3/6] ssim --- pytorch_lightning/metrics/functional/ssim.py | 142 +------------------ pytorch_lightning/metrics/regression/ssim.py | 78 +--------- tests/metrics/regression/test_ssim.py | 104 -------------- tests/metrics/test_remove_1-5_metrics.py | 17 ++- 4 files changed, 27 insertions(+), 314 deletions(-) delete mode 100644 tests/metrics/regression/test_ssim.py diff --git a/pytorch_lightning/metrics/functional/ssim.py b/pytorch_lightning/metrics/functional/ssim.py index 4899a3ad3be4d..8809fec8d8ff1 100644 --- a/pytorch_lightning/metrics/functional/ssim.py +++ b/pytorch_lightning/metrics/functional/ssim.py @@ -11,107 +11,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Optional, Sequence, Tuple +from typing import Optional, Sequence import torch -from torch.nn import functional as F -from torchmetrics.utilities import reduce -from torchmetrics.utilities.checks import _check_same_shape +from torchmetrics.functional import ssim as _ssim - -def _gaussian(kernel_size: int, sigma: int, dtype: torch.dtype, device: torch.device): - dist = torch.arange(start=(1 - kernel_size) / 2, end=(1 + kernel_size) / 2, step=1, dtype=dtype, device=device) - gauss = torch.exp(-torch.pow(dist / sigma, 2) / 2) - return (gauss / gauss.sum()).unsqueeze(dim=0) # (1, kernel_size) - - -def _gaussian_kernel( - channel: int, kernel_size: Sequence[int], sigma: Sequence[float], dtype: torch.dtype, device: torch.device -): - gaussian_kernel_x = _gaussian(kernel_size[0], sigma[0], dtype, device) - gaussian_kernel_y = _gaussian(kernel_size[1], sigma[1], dtype, device) - kernel = torch.matmul(gaussian_kernel_x.t(), gaussian_kernel_y) # (kernel_size, 1) * (1, kernel_size) - - return kernel.expand(channel, 1, kernel_size[0], kernel_size[1]) - - -def _ssim_update( - preds: torch.Tensor, - target: torch.Tensor, -) -> Tuple[torch.Tensor, torch.Tensor]: - if preds.dtype != target.dtype: - raise TypeError( - "Expected `preds` and `target` to have the same data type." - f" Got pred: {preds.dtype} and target: {target.dtype}." - ) - _check_same_shape(preds, target) - if len(preds.shape) != 4: - raise ValueError( - "Expected `preds` and `target` to have BxCxHxW shape." - f" Got pred: {preds.shape} and target: {target.shape}." - ) - return preds, target - - -def _ssim_compute( - preds: torch.Tensor, - target: torch.Tensor, - kernel_size: Sequence[int] = (11, 11), - sigma: Sequence[float] = (1.5, 1.5), - reduction: str = "elementwise_mean", - data_range: Optional[float] = None, - k1: float = 0.01, - k2: float = 0.03, -): - if len(kernel_size) != 2 or len(sigma) != 2: - raise ValueError( - "Expected `kernel_size` and `sigma` to have the length of two." - f" Got kernel_size: {len(kernel_size)} and sigma: {len(sigma)}." - ) - - if any(x % 2 == 0 or x <= 0 for x in kernel_size): - raise ValueError(f"Expected `kernel_size` to have odd positive number. Got {kernel_size}.") - - if any(y <= 0 for y in sigma): - raise ValueError(f"Expected `sigma` to have positive number. Got {sigma}.") - - if data_range is None: - data_range = max(preds.max() - preds.min(), target.max() - target.min()) - - c1 = pow(k1 * data_range, 2) - c2 = pow(k2 * data_range, 2) - device = preds.device - - channel = preds.size(1) - dtype = preds.dtype - kernel = _gaussian_kernel(channel, kernel_size, sigma, dtype, device) - pad_w = (kernel_size[0] - 1) // 2 - pad_h = (kernel_size[1] - 1) // 2 - - preds = F.pad(preds, (pad_w, pad_w, pad_h, pad_h), mode='reflect') - target = F.pad(target, (pad_w, pad_w, pad_h, pad_h), mode='reflect') - - input_list = torch.cat((preds, target, preds * preds, target * target, preds * target)) # (5 * B, C, H, W) - outputs = F.conv2d(input_list, kernel, groups=channel) - output_list = [outputs[x * preds.size(0):(x + 1) * preds.size(0)] for x in range(len(outputs))] - - mu_pred_sq = output_list[0].pow(2) - mu_target_sq = output_list[1].pow(2) - mu_pred_target = output_list[0] * output_list[1] - - sigma_pred_sq = output_list[2] - mu_pred_sq - sigma_target_sq = output_list[3] - mu_target_sq - sigma_pred_target = output_list[4] - mu_pred_target - - upper = 2 * sigma_pred_target + c2 - lower = sigma_pred_sq + sigma_target_sq + c2 - - ssim_idx = ((2 * mu_pred_target + c1) * upper) / ((mu_pred_sq + mu_target_sq + c1) * lower) - ssim_idx = ssim_idx[..., pad_h:-pad_h, pad_w:-pad_w] - - return reduce(ssim_idx, reduction) +from pytorch_lightning.utilities.deprecation import deprecated +@deprecated(target=_ssim, ver_deprecate="1.3.0", ver_remove="1.5.0") def ssim( preds: torch.Tensor, target: torch.Tensor, @@ -123,44 +31,6 @@ def ssim( k2: float = 0.03, ) -> torch.Tensor: """ - Computes Structual Similarity Index Measure - - Args: - preds: estimated image - target: ground truth image - kernel_size: size of the gaussian kernel (default: (11, 11)) - sigma: Standard deviation of the gaussian kernel (default: (1.5, 1.5)) - reduction: a method to reduce metric score over labels. - - - ``'elementwise_mean'``: takes the mean (default) - - ``'sum'``: takes the sum - - ``'none'``: no reduction will be applied - - data_range: Range of the image. If ``None``, it is determined from the image (max - min) - k1: Parameter of SSIM. Default: 0.01 - k2: Parameter of SSIM. Default: 0.03 - - Return: - Tensor with SSIM score - - Raises: - TypeError: - If ``preds`` and ``target`` don't have the same data type. - ValueError: - If ``preds`` and ``target`` don't have ``BxCxHxW shape``. - ValueError: - If the length of ``kernel_size`` or ``sigma`` is not ``2``. - ValueError: - If one of the elements of ``kernel_size`` is not an ``odd positive number``. - ValueError: - If one of the elements of ``sigma`` is not a ``positive number``. - - Example: - >>> from pytorch_lightning.metrics.functional import ssim - >>> preds = torch.rand([16, 1, 16, 16]) - >>> target = preds * 0.75 - >>> ssim(preds, target) - tensor(0.9219) + .. deprecated:: + Use :func:`torchmetrics.functional.ssim`. Will be removed in v1.5.0. """ - preds, target = _ssim_update(preds, target) - return _ssim_compute(preds, target, kernel_size, sigma, reduction, data_range, k1, k2) diff --git a/pytorch_lightning/metrics/regression/ssim.py b/pytorch_lightning/metrics/regression/ssim.py index a3bbab938ffad..930f803ab8679 100644 --- a/pytorch_lightning/metrics/regression/ssim.py +++ b/pytorch_lightning/metrics/regression/ssim.py @@ -13,43 +13,14 @@ # limitations under the License. from typing import Any, Optional, Sequence -import torch -from torchmetrics import Metric +from torchmetrics import SSIM as _SSIM -from pytorch_lightning.metrics.functional.ssim import _ssim_compute, _ssim_update -from pytorch_lightning.utilities import rank_zero_warn +from pytorch_lightning.utilities.deprecation import deprecated -class SSIM(Metric): - """ - Computes `Structual Similarity Index Measure - `_ (SSIM). - - Args: - kernel_size: size of the gaussian kernel (default: (11, 11)) - sigma: Standard deviation of the gaussian kernel (default: (1.5, 1.5)) - reduction: a method to reduce metric score over labels. - - - ``'elementwise_mean'``: takes the mean (default) - - ``'sum'``: takes the sum - - ``'none'``: no reduction will be applied - - data_range: Range of the image. If ``None``, it is determined from the image (max - min) - k1: Parameter of SSIM. Default: 0.01 - k2: Parameter of SSIM. Default: 0.03 - - Return: - Tensor with SSIM score - - Example: - >>> from pytorch_lightning.metrics import SSIM - >>> preds = torch.rand([16, 1, 16, 16]) - >>> target = preds * 0.75 - >>> ssim = SSIM() - >>> ssim(preds, target) - tensor(0.9219) - """ +class SSIM(_SSIM): + @deprecated(target=_SSIM, ver_deprecate="1.3.0", ver_remove="1.5.0") def __init__( self, kernel_size: Sequence[int] = (11, 11), @@ -62,44 +33,9 @@ def __init__( dist_sync_on_step: bool = False, process_group: Optional[Any] = None, ): - super().__init__( - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - ) - rank_zero_warn( - 'Metric `SSIM` will save all targets and' - ' predictions in buffer. For large datasets this may lead' - ' to large memory footprint.' - ) - - self.add_state("y", default=[], dist_reduce_fx=None) - self.add_state("y_pred", default=[], dist_reduce_fx=None) - self.kernel_size = kernel_size - self.sigma = sigma - self.data_range = data_range - self.k1 = k1 - self.k2 = k2 - self.reduction = reduction - - def update(self, preds: torch.Tensor, target: torch.Tensor): """ - Update state with predictions and targets. + This implementation refers to :class:`~torchmetrics.Accuracy`. - Args: - preds: Predictions from model - target: Ground truth values - """ - preds, target = _ssim_update(preds, target) - self.y_pred.append(preds) - self.y.append(target) - - def compute(self): - """ - Computes explained variance over state. + .. deprecated:: + Use :class:`~torchmetrics.Accuracy`. Will be removed in v1.5.0. """ - preds = torch.cat(self.y_pred, dim=0) - target = torch.cat(self.y, dim=0) - return _ssim_compute( - preds, target, self.kernel_size, self.sigma, self.reduction, self.data_range, self.k1, self.k2 - ) diff --git a/tests/metrics/regression/test_ssim.py b/tests/metrics/regression/test_ssim.py deleted file mode 100644 index f7e4b7a58e001..0000000000000 --- a/tests/metrics/regression/test_ssim.py +++ /dev/null @@ -1,104 +0,0 @@ -from collections import namedtuple -from functools import partial - -import pytest -import torch -from skimage.metrics import structural_similarity - -from pytorch_lightning.metrics.functional import ssim -from pytorch_lightning.metrics.regression import SSIM -from tests.metrics.utils import BATCH_SIZE, MetricTester, NUM_BATCHES - -torch.manual_seed(42) - -Input = namedtuple('Input', ["preds", "target", "multichannel"]) - -_inputs = [] -for size, channel, coef, multichannel, dtype in [ - (12, 3, 0.9, True, torch.float), - (13, 1, 0.8, False, torch.float32), - (14, 1, 0.7, False, torch.double), - (15, 3, 0.6, True, torch.float64), -]: - preds = torch.rand(NUM_BATCHES, BATCH_SIZE, channel, size, size, dtype=dtype) - _inputs.append(Input( - preds=preds, - target=preds * coef, - multichannel=multichannel, - )) - - -def _sk_metric(preds, target, data_range, multichannel): - c, h, w = preds.shape[-3:] - sk_preds = preds.view(-1, c, h, w).permute(0, 2, 3, 1).numpy() - sk_target = target.view(-1, c, h, w).permute(0, 2, 3, 1).numpy() - if not multichannel: - sk_preds = sk_preds[:, :, :, 0] - sk_target = sk_target[:, :, :, 0] - - return structural_similarity( - sk_target, - sk_preds, - data_range=data_range, - multichannel=multichannel, - gaussian_weights=True, - win_size=11, - sigma=1.5, - use_sample_covariance=False - ) - - -@pytest.mark.parametrize( - "preds, target, multichannel", - [(i.preds, i.target, i.multichannel) for i in _inputs], -) -class TestSSIM(MetricTester): - atol = 6e-5 - - @pytest.mark.parametrize("ddp", [True, False]) - @pytest.mark.parametrize("dist_sync_on_step", [True, False]) - def test_ssim(self, preds, target, multichannel, ddp, dist_sync_on_step): - self.run_class_metric_test( - ddp, - preds, - target, - SSIM, - partial(_sk_metric, data_range=1.0, multichannel=multichannel), - metric_args={"data_range": 1.0}, - dist_sync_on_step=dist_sync_on_step, - ) - - def test_ssim_functional(self, preds, target, multichannel): - self.run_functional_metric_test( - preds, - target, - ssim, - partial(_sk_metric, data_range=1.0, multichannel=multichannel), - metric_args={"data_range": 1.0}, - ) - - -@pytest.mark.parametrize( - ['pred', 'target', 'kernel', 'sigma'], - [ - pytest.param([1, 16, 16], [1, 16, 16], [11, 11], [1.5, 1.5]), # len(shape) - pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11, 11], [1.5]), # len(kernel), len(sigma) - pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11], [1.5, 1.5]), # len(kernel), len(sigma) - pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11], [1.5]), # len(kernel), len(sigma) - pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11, 0], [1.5, 1.5]), # invalid kernel input - pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11, 10], [1.5, 1.5]), # invalid kernel input - pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11, -11], [1.5, 1.5]), # invalid kernel input - pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11, 11], [1.5, 0]), # invalid sigma input - pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11, 0], [1.5, -1.5]), # invalid sigma input - ], -) -def test_ssim_invalid_inputs(pred, target, kernel, sigma): - pred_t = torch.rand(pred) - target_t = torch.rand(target, dtype=torch.float64) - with pytest.raises(TypeError): - ssim(pred_t, target_t) - - pred = torch.rand(pred) - target = torch.rand(target) - with pytest.raises(ValueError): - ssim(pred, target, kernel, sigma) diff --git a/tests/metrics/test_remove_1-5_metrics.py b/tests/metrics/test_remove_1-5_metrics.py index 28a33ea332ef6..7f90cffea047d 100644 --- a/tests/metrics/test_remove_1-5_metrics.py +++ b/tests/metrics/test_remove_1-5_metrics.py @@ -37,7 +37,7 @@ R2Score, Recall, ROC, - StatScores, + StatScores, SSIM, ) from pytorch_lightning.metrics.functional import ( auc, @@ -59,7 +59,7 @@ r2score, recall, roc, - stat_scores, + stat_scores, ssim, ) from pytorch_lightning.metrics.functional.accuracy import accuracy from pytorch_lightning.metrics.functional.mean_relative_error import mean_relative_error @@ -303,6 +303,10 @@ def test_v1_5_metric_regress(): with pytest.deprecated_call(match='It will be removed in v1.5.0'): R2Score() + SSIM.__init__.warned = False + with pytest.deprecated_call(match='It will be removed in v1.5.0'): + SSIM() + preds = torch.tensor([[0.0, 1.0], [2.0, 3.0]]) target = torch.tensor([[3.0, 2.0], [1.0, 0.0]]) psnr.warned = False @@ -315,4 +319,11 @@ def test_v1_5_metric_regress(): r2score.warned = False with pytest.deprecated_call(match='It will be removed in v1.5.0'): res = r2score(preds, target) - assert torch.allclose(res, torch.tensor(0.9486), atol=1e-4) \ No newline at end of file + assert torch.allclose(res, torch.tensor(0.9486), atol=1e-4) + + preds = torch.rand([16, 1, 16, 16]) + target = preds * 0.75 + ssim.warned = False + with pytest.deprecated_call(match='It will be removed in v1.5.0'): + res = ssim(preds, target) + assert torch.allclose(res, torch.tensor(0.9219), atol=1e-4) \ No newline at end of file From 3dfca7680423b553961df61fb5147b472e609ecb Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Mon, 22 Mar 2021 18:30:38 +0100 Subject: [PATCH 4/6] chlog --- CHANGELOG.md | 2 ++ tests/metrics/test_remove_1-5_metrics.py | 6 ++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 57a071bff297a..4cf3e0f1fd326 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -92,6 +92,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). [#6636](https://github.com/PyTorchLightning/pytorch-lightning/pull/6636), + [#6637](https://github.com/PyTorchLightning/pytorch-lightning/pull/6637), + ) diff --git a/tests/metrics/test_remove_1-5_metrics.py b/tests/metrics/test_remove_1-5_metrics.py index 7f90cffea047d..0cd25c54cc7aa 100644 --- a/tests/metrics/test_remove_1-5_metrics.py +++ b/tests/metrics/test_remove_1-5_metrics.py @@ -37,7 +37,8 @@ R2Score, Recall, ROC, - StatScores, SSIM, + SSIM, + StatScores, ) from pytorch_lightning.metrics.functional import ( auc, @@ -59,7 +60,8 @@ r2score, recall, roc, - stat_scores, ssim, + ssim, + stat_scores, ) from pytorch_lightning.metrics.functional.accuracy import accuracy from pytorch_lightning.metrics.functional.mean_relative_error import mean_relative_error From 7c30ef2b0a768614beab8a197dcec82ca7877700 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Mon, 22 Mar 2021 18:39:40 +0100 Subject: [PATCH 5/6] . --- tests/metrics/test_remove_1-5_metrics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/metrics/test_remove_1-5_metrics.py b/tests/metrics/test_remove_1-5_metrics.py index 0cd25c54cc7aa..43dd330bcfcbe 100644 --- a/tests/metrics/test_remove_1-5_metrics.py +++ b/tests/metrics/test_remove_1-5_metrics.py @@ -328,4 +328,4 @@ def test_v1_5_metric_regress(): ssim.warned = False with pytest.deprecated_call(match='It will be removed in v1.5.0'): res = ssim(preds, target) - assert torch.allclose(res, torch.tensor(0.9219), atol=1e-4) \ No newline at end of file + assert torch.allclose(res, torch.tensor(0.9219), atol=1e-4) From 543183fb34983c0feb6e0f14daa7895a5b8d56b6 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Mon, 22 Mar 2021 18:56:29 +0100 Subject: [PATCH 6/6] Apply suggestions from code review MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Carlos MocholĂ­ --- pytorch_lightning/metrics/regression/r2score.py | 4 ++-- pytorch_lightning/metrics/regression/ssim.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pytorch_lightning/metrics/regression/r2score.py b/pytorch_lightning/metrics/regression/r2score.py index 780cb3bde9f85..52621d6df7c28 100644 --- a/pytorch_lightning/metrics/regression/r2score.py +++ b/pytorch_lightning/metrics/regression/r2score.py @@ -32,8 +32,8 @@ def __init__( dist_sync_fn: Callable = None, ): """ - This implementation refers to :class:`~torchmetrics.Accuracy`. + This implementation refers to :class:`~torchmetrics.R2Score`. .. deprecated:: - Use :class:`~torchmetrics.Accuracy`. Will be removed in v1.5.0. + Use :class:`~torchmetrics.R2Score`. Will be removed in v1.5.0. """ diff --git a/pytorch_lightning/metrics/regression/ssim.py b/pytorch_lightning/metrics/regression/ssim.py index 930f803ab8679..b290808c6fa5e 100644 --- a/pytorch_lightning/metrics/regression/ssim.py +++ b/pytorch_lightning/metrics/regression/ssim.py @@ -34,8 +34,8 @@ def __init__( process_group: Optional[Any] = None, ): """ - This implementation refers to :class:`~torchmetrics.Accuracy`. + This implementation refers to :class:`~torchmetrics.SSIM`. .. deprecated:: - Use :class:`~torchmetrics.Accuracy`. Will be removed in v1.5.0. + Use :class:`~torchmetrics.SSIM`. Will be removed in v1.5.0. """