Skip to content

Commit

Permalink
Permit empty standings precedence and warn user accordingly (#1149)
Browse files Browse the repository at this point in the history
* Make standings generation more robust #1108

This rewrites metricgetter() to avoid using Python's itemgetter(),
so that the function it returns will always return a tuple. This
simplifies a lot of handling, because it avoids the special case
where itemgetter(item) called with one argument returns a single
item rather than a tuple of length 1.

As a consequence, the case where no metrics are specified now
shouldn't crash. Also added standings test for this case.

* Fix bug where extra metrics cause standings to crash

Also added a unit test for this scenario

* Add user alerts for blank standings precedence #1108
  • Loading branch information
czlee authored and philipbelesky committed Jul 29, 2019
1 parent bb28342 commit 82cd59e
Show file tree
Hide file tree
Showing 13 changed files with 175 additions and 68 deletions.
14 changes: 14 additions & 0 deletions tabbycat/availability/templates/availability_index.html
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,20 @@
</div>
{% endif %}

{% if pref.team_standings_precedence|length == 0 %}
{% tournamenturl 'options-tournament-section' section='standings' as standings_config_url %}
{% blocktrans trimmed asvar message %}
The team standings precedence is empty. This means that teams aren't
ranked according to any metrics, so all teams will be in a single bracket
containing everyone. If this isn't what you intended, set the team
standings precedence in the
<a href="{{ standings_config_url }}" class="alert-link">Standings section
of this tournament's configuration</a> before creating the draw. In most
tournaments, the first metric should be points or wins.
{% endblocktrans %}
{% include "components/alert.html" with type="warning" icon="alert-circle" %}
{% endif %}

{% endblock %}

{% block content %}
Expand Down
2 changes: 1 addition & 1 deletion tabbycat/draw/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,7 @@ def get_teams(self):
ranked = []
for standing in standings:
team = standing.team
team.points = next(standing.itermetrics())
team.points = next(standing.itermetrics(), 0)
ranked.append(team)

return ranked
Expand Down
72 changes: 46 additions & 26 deletions tabbycat/draw/tables.py
Original file line number Diff line number Diff line change
Expand Up @@ -266,21 +266,29 @@ def build(self, draw, teams, side_histories_before, side_histories_now, standing
self.add_team_columns(teams)

# First metric
metric_info = next(self.standings.metrics_info())
header = {
'key': "pts", # always use 'pts' to make it more predictable
'title': force_text(metric_info['abbr']),
'tooltip': force_text(metric_info['name']),
}
infos = self.standings.get_standings(teams)
cells = []
for info in infos:
points = info.metrics[metric_info['key']]
cells.append({
'text': metricformat(points),
'sort': points,
})
self.add_column(header, cells)
if len(self.standings.metric_keys) == 0: # special case: no metrics used
header = {
'key': "pts",
'title': "?",
'tooltip': _("No metrics in the team standings precedence"),
}
self.add_column(header, [0] * len(teams))
else:
metric_info = next(self.standings.metrics_info())
header = {
'key': "pts", # always use 'pts' to make it more predictable
'title': force_text(metric_info['abbr']),
'tooltip': force_text(metric_info['name']),
}
cells = []
infos = self.standings.get_standings(teams)
for info in infos:
points = info.metrics[metric_info['key']]
cells.append({
'text': metricformat(points),
'sort': points,
})
self.add_column(header, cells)

# Sides
sides_lookup = {dt.team_id: dt.side for debate in draw
Expand Down Expand Up @@ -347,9 +355,13 @@ def build(self, debates, teams, side_histories_before, side_histories_now, stand
self.highlight_rows_by_column_value(column=0) # highlight first row of a new bracket

def add_permitted_points_column(self):
first_metric = self.standings.metric_keys[0]
points = [info.metrics[first_metric] for info in self.standings]
points.sort(reverse=True)
if len(self.standings.metric_keys) == 0: # special case: no metrics used
points = [0] * len(self.standings)
else:
first_metric = self.standings.metric_keys[0]
points = [info.metrics[first_metric] for info in self.standings]
points.sort(reverse=True)

pref = self.tournament.pref('bp_pullup_distribution')
define_rooms_func = getattr(BPHungarianDrawGenerator, BPHungarianDrawGenerator.DEFINE_ROOM_FUNCTIONS[pref])
rooms = define_rooms_func(points)
Expand Down Expand Up @@ -377,14 +389,22 @@ def add_all_columns_for_team(self, side):
row[-1]['class'] = 'highlight-col'

# Points of team
metric_info = next(self.standings.metrics_info())
header = {
'key': "pts" + side_abbr, # always use 'pts' to make it more predictable
'tooltip': _("%(team)s: %(metric)s") % {'team': side_abbr, 'metric': metric_info['name']},
'icon': 'star'
}
infos = self.standings.get_standings(teams)
self.add_column(header, [metricformat(info.metrics[metric_info['key']]) for info in infos])
if len(self.standings.metric_keys) == 0: # special case: no metrics used
header = {
'key': "pts" + side_abbr,
'tooltip': _("No metrics in the team standings precedence"),
'icon': 'star'
}
self.add_column(header, [0] * len(teams))
else:
metric_info = next(self.standings.metrics_info())
header = {
'key': "pts" + side_abbr, # always use 'pts' to make it more predictable
'tooltip': _("%(team)s: %(metric)s") % {'team': side_abbr, 'metric': metric_info['name']},
'icon': 'star'
}
infos = self.standings.get_standings(teams)
self.add_column(header, [metricformat(info.metrics[metric_info['key']]) for info in infos])

# Side history after last round
header = self._prepend_side_header(side, _("side history before this round"), _("Sides"), text_only=True)
Expand Down
18 changes: 17 additions & 1 deletion tabbycat/draw/templates/draw_status_draft.html
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,23 @@
{% endblock %}

{% block page-alerts %}
{% if pref.teams_in_debate == 'bp' and pref.team_standings_precedence.0 != 'points' %}
{% if pref.team_standings_precedence|length == 0 %}
{% tournamenturl 'options-tournament-section' section='standings' as standings_config_url %}
{% blocktrans trimmed asvar message %}
The team standings precedence is empty. This means that teams aren't
ranked according to any metrics, so all teams are in a single bracket
containing everyone. If this isn't what you intended, set the team
standings precedence in the
<a href="{{ standings_config_url }}" class="alert-link">Standings section
of this tournament's configuration</a>,
then delete and recreate the draw. In most
tournaments, the first metric should be points or wins.
{% endblocktrans %}
{# Now just 'info' because the user already clicked through the 'warning' when creating the draw. #}
{% include "components/alert.html" with type="info" icon="alert-circle" %}
{% endif %}

{% if pref.teams_in_debate == 'bp' and pref.team_standings_precedence|length > 0 and pref.team_standings_precedence.0 != 'points' %}
{% tournamenturl 'options-tournament-section' section='standings' as standings_config_url %}
{% blocktrans trimmed asvar message with metric=pref.team_standings_precedence.0|teammetricname %}
Brackets are formed using the first metric in the team standings precedence, which is currently
Expand Down
22 changes: 5 additions & 17 deletions tabbycat/standings/base.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
"""Base class for standings generators."""

from operator import itemgetter
import random
import logging

from django.utils.translation import gettext as _

from .metrics import RepeatedMetricAnnotator
from .metrics import metricgetter, RepeatedMetricAnnotator

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -140,7 +139,7 @@ def __init__(self, instances, rank_filter=None):
self._rank_limit = None

self.metric_keys = list()
self.metric_ascending = list()
self.metric_ascending = dict()
self.ranking_keys = list()
self._metric_specs = list()
self._ranking_specs = list()
Expand Down Expand Up @@ -209,7 +208,7 @@ def get_standings(self, instances):

def record_added_metric(self, key, name, abbr, icon, ascending):
self.metric_keys.append(key)
self.metric_ascending.append(ascending)
self.metric_ascending[key] = ascending
self._metric_specs.append((key, name, abbr, icon))

def record_added_ranking(self, key, name, abbr, icon):
Expand All @@ -226,19 +225,8 @@ def sort(self, precedence, tiebreak_func=None):
if tiebreak_func:
tiebreak_func(self._standings)

metricitemgetter = itemgetter(*precedence)

# Like standings.metrics.metricgetter, but negates metrics ranked in ascending order
if len(precedence) == 1 and self.metric_ascending[0]:
def metrics_for_ranking(info):
return -metricitemgetter(info.metrics)
elif len(precedence) == 1 and not self.metric_ascending[0]:
def metrics_for_ranking(info):
return metricitemgetter(info.metrics)
else:
def metrics_for_ranking(info):
metrics = metricitemgetter(info.metrics)
return tuple(-x if asc else x for x, asc in zip(metrics, self.metric_ascending))
ascending = [self.metric_ascending[key] for key in precedence]
metrics_for_ranking = metricgetter(precedence, ascending)

try:
self._standings.sort(key=metrics_for_ranking, reverse=True)
Expand Down
31 changes: 20 additions & 11 deletions tabbycat/standings/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,23 +8,32 @@

import logging

from operator import itemgetter

logger = logging.getLogger(__name__)


def metricgetter(*items):
"""Returns a callable object that fetches `item` from its operand's
`metrics` attribute. If multiple items are specified, returns a tuple.
def metricgetter(items, negate=None):
"""Returns a callable object that fetches each item in `items` from its
operand's `metrics` attribute, and returns a tuple containing the results.
The tuple will have the same number for elements as `items`.
For example:
- After `f = metricgetter("a")`, the call `f(x)` returns `x.metrics["a"]`.
- After `g = metricgetter(4, 9)`, the call `g(x)` returns `(x.metrics[4], x.metrics[9])`.
- After `f = metricgetter(("a",))`, the call `f(x)` returns `(x.metrics["a"],)`.
- After `g = metricgetter((4, 9))`, the call `g(x)` returns `(x.metrics[4], x.metrics[9])`.
"""
if len(items) == 0: # itemgetter() fails with no arguments
return lambda x: ()

metricitemgetter = itemgetter(*items)
return lambda x: metricitemgetter(x.metrics)
if negate is None:

def metricitemgetter(x):
return tuple(x.metrics[item] for item in items)

else:
assert len(items) == len(negate), "items had %d items but negate had %d" % (len(items), len(negate))
coeffs = [-1 if neg else 1 for neg in negate]

def metricitemgetter(x):
return tuple(coeff * x.metrics[item] for (coeff, item) in zip(coeffs, items))

return metricitemgetter


class BaseMetricAnnotator:
Expand Down
10 changes: 5 additions & 5 deletions tabbycat/standings/ranking.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ class BasicRankAnnotator(BaseRankAnnotator):
icon = "bar-chart"

def __init__(self, metrics):
self.rank_key = metricgetter(*metrics)
self.rank_key = metricgetter(metrics)

def annotate(self, standings):
rank = 1
Expand Down Expand Up @@ -93,8 +93,8 @@ class SubrankAnnotator(BaseRankWithinGroupAnnotator):
abbr = "Sub"

def __init__(self, metrics):
self.group_key = metricgetter(metrics[0])
self.rank_key = metricgetter(*metrics[1:])
self.group_key = metricgetter(metrics[:1]) # don't crash if there are no metrics
self.rank_key = metricgetter(metrics[1:])


class DivisionRankAnnotator(BaseRankWithinGroupAnnotator):
Expand All @@ -104,7 +104,7 @@ class DivisionRankAnnotator(BaseRankWithinGroupAnnotator):
abbr = "Div"

def __init__(self, metrics):
self.rank_key = metricgetter(*metrics)
self.rank_key = metricgetter(metrics)

@staticmethod
def group_key(tsi):
Expand All @@ -118,7 +118,7 @@ class RankFromInstitutionAnnotator(BaseRankWithinGroupAnnotator):
abbr = "Inst"

def __init__(self, metrics):
self.rank_key = metricgetter(*metrics)
self.rank_key = metricgetter(metrics)

@staticmethod
def group_key(tsi):
Expand Down
4 changes: 2 additions & 2 deletions tabbycat/standings/teams.py
Original file line number Diff line number Diff line change
Expand Up @@ -318,7 +318,7 @@ def get_team_scores(self, key, equal_teams, tsi, round):
return ts

def annotate(self, queryset, standings, round=None):
key = metricgetter(*self.keys)
key = metricgetter(self.keys)

def who_beat_whom(tsi):
equal_teams = [x for x in standings.infoview() if key(x) == key(tsi)]
Expand All @@ -343,7 +343,7 @@ class DivisionsWhoBeatWhomMetricAnnotator(WhoBeatWhomMetricAnnotator):
choice_name = _("who-beat-whom (in divisions)")

def annotate(self, queryset, standings, round=None):
key = metricgetter(*self.keys)
key = metricgetter(self.keys)

def who_beat_whom_divisions(tsi):
equal_teams = [x for x in standings.infoview() if key(x) == key(tsi) and x.team.division == tsi.team.division]
Expand Down
17 changes: 16 additions & 1 deletion tabbycat/standings/templates/speaker_standings.html
Original file line number Diff line number Diff line change
@@ -1,8 +1,24 @@
{% extends "standings_table.html" %}
{% load debate_tags i18n %}

{# Note: This template is only used for admins (not public views of the tab). #}

{% block page-alerts %}

{% tournamenturl 'options-tournament-section' section='standings' as standings_config_url %}

{% if pref.speaker_standings_precedence|length == 0 %}
{% blocktrans trimmed asvar message %}
The speaker standings precedence is empty. This means that speakers aren't
ranked according to any metrics, so everyone is first equal. If this isn't
what you intended, set the speaker standings precedence in the
<a href="{{ standings_config_url }}" class="alert-link">Standings section
of this tournament's configuration</a>. In most
tournaments, the first metric should be total or average.
{% endblocktrans %}
{% include "components/alert.html" with type="warning" icon="alert-circle" %}
{% endif %}

{% if pref.speaker_standings_precedence.0 == 'average' %}
{% trans "Speakers are ranked by their <strong>average score</strong>." as p1 %}
{% elif pref.speaker_standings_precedence.0 == 'total' %}
Expand All @@ -21,7 +37,6 @@
{% trans "All speakers appear in these standings, no matter how many debates they've missed." as p2 %}
{% endif %}

{% tournamenturl 'options-tournament-section' section='standings' as standings_config_url %}
{% blocktrans trimmed asvar p3 %}
These settings can be changed in the
<a href="{{ standings_config_url }}" class="alert-link">Standings section
Expand Down
21 changes: 21 additions & 0 deletions tabbycat/standings/templates/team_standings.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
{% extends "standings_table.html" %}
{% load debate_tags i18n %}

{# Note: This template is only used for admins (not public views of the tab). #}

{% block page-alerts %}

{% if pref.team_standings_precedence|length == 0 %}
{% tournamenturl 'options-tournament-section' section='standings' as standings_config_url %}
{% blocktrans trimmed asvar message %}
The team standings precedence is empty. This means that teams aren't
ranked according to any metrics, so everyone is first equal. If this isn't
what you intended, set the team standings precedence in the
<a href="{{ standings_config_url }}" class="alert-link">Standings section
of this tournament's configuration</a>. In most
tournaments, the first metric should be points or wins.
{% endblocktrans %}
{% include "components/alert.html" with type="warning" icon="alert-circle" %}
{% endif %}

{% endblock %}
25 changes: 24 additions & 1 deletion tabbycat/standings/tests/test_standings.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,28 @@ def tearDown(self):
DebateTeam.objects.filter(team__tournament=self.tournament).delete()
self.tournament.delete()

def test_points(self):
def test_nothing(self):
# just test that it does not crash
generator = TeamStandingsGenerator((), ())
generator.generate(self.tournament.team_set.all())

def test_no_metrics(self):
generator = TeamStandingsGenerator((), ('rank', 'subrank'))
standings = generator.generate(self.tournament.team_set.all())
self.assertEqual(standings.get_standing(self.team1).rankings['rank'], (1, True))
self.assertEqual(standings.get_standing(self.team2).rankings['rank'], (1, True))
self.assertEqual(standings.get_standing(self.team1).rankings['subrank'], (1, True))
self.assertEqual(standings.get_standing(self.team2).rankings['subrank'], (1, True))

def test_only_extra_metrics(self):
generator = TeamStandingsGenerator((), ('rank', 'subrank'), extra_metrics=('points',))
standings = generator.generate(self.tournament.team_set.all())
self.assertEqual(standings.get_standing(self.team1).rankings['rank'], (1, True))
self.assertEqual(standings.get_standing(self.team2).rankings['rank'], (1, True))
self.assertEqual(standings.get_standing(self.team1).rankings['subrank'], (1, True))
self.assertEqual(standings.get_standing(self.team2).rankings['subrank'], (1, True))

def test_no_rankings(self):
generator = TeamStandingsGenerator(('points',), ())
standings = generator.generate(self.tournament.team_set.all())
self.assertEqual(standings.get_standing(self.team1).metrics['points'], 2)
Expand Down Expand Up @@ -156,6 +177,8 @@ def test_points_speaks_subrank(self):
self.assertEqual(standings.get_standing(self.team2).metrics['points'], 0)
self.assertEqual(standings.get_standing(self.team1).metrics['speaks_sum'], 203)
self.assertEqual(standings.get_standing(self.team2).metrics['speaks_sum'], 197)
self.assertEqual(standings.get_standing(self.team1).rankings['rank'], (1, False))
self.assertEqual(standings.get_standing(self.team2).rankings['rank'], (2, False))
self.assertEqual(standings.get_standing(self.team1).rankings['subrank'], (1, False))
self.assertEqual(standings.get_standing(self.team2).rankings['subrank'], (1, False))

Expand Down
Loading

0 comments on commit 82cd59e

Please sign in to comment.