Skip to content

Commit

Permalink
some docstring and name fix
Browse files Browse the repository at this point in the history
  • Loading branch information
sshuair committed Apr 30, 2021
1 parent 38709ff commit df21986
Show file tree
Hide file tree
Showing 3 changed files with 24 additions and 37 deletions.
38 changes: 19 additions & 19 deletions mmseg/core/evaluation/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,11 +153,11 @@ def mean_iou(results,
Returns:
dict[str, float | ndarray]:
<aAcc> float: Overall accuracy on all images.
<Acc> ndarray: Per category accuracy, shape (num_classes, ).
<IoU> ndarray: Per category IoU, shape (num_classes, ).
<aAcc> float: Overall accuracy on all images.
<Acc> ndarray: Per category accuracy, shape (num_classes, ).
<IoU> ndarray: Per category IoU, shape (num_classes, ).
"""
mIoU_result = eval_metrics(
iou_result = eval_metrics(
results=results,
gt_seg_maps=gt_seg_maps,
num_classes=num_classes,
Expand All @@ -166,7 +166,7 @@ def mean_iou(results,
nan_to_num=nan_to_num,
label_map=label_map,
reduce_zero_label=reduce_zero_label)
return mIoU_result
return iou_result


def mean_dice(results,
Expand All @@ -192,12 +192,12 @@ def mean_dice(results,
Returns:
dict[str, float | ndarray]: Default metrics.
<aAcc> float: Overall accuracy on all images.
<Acc> ndarray: Per category accuracy, shape (num_classes, ).
<Dice> ndarray: Per category dice, shape (num_classes, ).
<aAcc> float: Overall accuracy on all images.
<Acc> ndarray: Per category accuracy, shape (num_classes, ).
<Dice> ndarray: Per category dice, shape (num_classes, ).
"""

mDice_result = eval_metrics(
dice_result = eval_metrics(
results=results,
gt_seg_maps=gt_seg_maps,
num_classes=num_classes,
Expand All @@ -206,7 +206,7 @@ def mean_dice(results,
nan_to_num=nan_to_num,
label_map=label_map,
reduce_zero_label=reduce_zero_label)
return mDice_result
return dice_result


def mean_fscore(results,
Expand Down Expand Up @@ -236,12 +236,12 @@ def mean_fscore(results,
Returns:
dict[str, float | ndarray]: Default metrics.
<aAcc> float: Overall accuracy on all images.
<Fscore> ndarray: Per category recall, shape (num_classes, ).
<Precision> ndarray: Per category precision, shape (num_classes, ).
<Recall> ndarray: Per category f-score, shape (num_classes, ).
<aAcc> float: Overall accuracy on all images.
<Fscore> ndarray: Per category recall, shape (num_classes, ).
<Precision> ndarray: Per category precision, shape (num_classes, ).
<Recall> ndarray: Per category f-score, shape (num_classes, ).
"""
mFscore_result = eval_metrics(
fscore_result = eval_metrics(
results=results,
gt_seg_maps=gt_seg_maps,
num_classes=num_classes,
Expand All @@ -251,7 +251,7 @@ def mean_fscore(results,
label_map=label_map,
reduce_zero_label=reduce_zero_label,
beta=beta)
return mFscore_result
return fscore_result


def eval_metrics(results,
Expand All @@ -277,9 +277,9 @@ def eval_metrics(results,
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. Default: False.
Returns:
float: Overall accuracy on all images.
ndarray: Per category accuracy, shape (num_classes, ).
ndarray: Per category evaluation metrics, shape (num_classes, ).
float: Overall accuracy on all images.
ndarray: Per category accuracy, shape (num_classes, ).
ndarray: Per category evaluation metrics, shape (num_classes, ).
"""
if isinstance(metrics, str):
metrics = [metrics]
Expand Down
5 changes: 4 additions & 1 deletion mmseg/datasets/custom.py
Original file line number Diff line number Diff line change
Expand Up @@ -370,7 +370,10 @@ def evaluate(self,

summary_table_data = PrettyTable()
for key, val in ret_metrics_summary.items():
summary_table_data.add_column(key, [val])
if key == 'aAcc':
summary_table_data.add_column(key, [val])
else:
summary_table_data.add_column('m' + key, [val])

print_log('per class results:', logger)
print_log('\n' + class_table_data.get_string(), logger=logger)
Expand Down
18 changes: 1 addition & 17 deletions tests/test_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,23 +2,7 @@

from mmseg.core.evaluation import (eval_metrics, mean_dice, mean_fscore,
mean_iou)


def f_score(precision, recall, beta=1):
"""calcuate the f-score value.
Args:
precision (float | torch.Tensor): The precision value.
recall (float | torch.Tensor): The recall value.
beta (int): Determines the weight of recall in the combined score.
Default: False.
Returns:
[torch.tensor]: The f-score value.
"""
score = (1 + beta**2) * (precision * recall) / (
(beta**2 * precision) + recall)
return score
from mmseg.core.evaluation.metrics import f_score


def get_confusion_matrix(pred_label, label, num_classes, ignore_index):
Expand Down

0 comments on commit df21986

Please sign in to comment.