Skip to content

Commit

Permalink
[haproxy] ` ̶c̶o̶l̶l̶a̶t̶e̶_̶s̶t̶a̶t̶u̶s̶_̶t̶a̶g̶s̶_̶p̶e̶r̶_̶h̶o̶s̶t̶…
Browse files Browse the repository at this point in the history
…` option

HAProxy newly introduced `collate_status_tags_per_host` option does not
appropriately aggregates metrics when a backend is associated to
multiple service. Remove it, to fix it.
  • Loading branch information
yannmh committed Mar 2, 2016
1 parent a0d298d commit f6b7545
Showing 1 changed file with 7 additions and 42 deletions.
49 changes: 7 additions & 42 deletions checks.d/haproxy.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,17 +95,6 @@ def check(self, instance):
instance.get('count_status_by_service', True)
)

collate_status_tags_per_host = _is_affirmative(
instance.get('collate_status_tags_per_host', False)
)

if collate_status_tags_per_host and not collect_status_metrics_by_host:
self.log.warning(
u"Status tags collation (`collate_status_tags_per_host`) has no effect when status "
u"metrics collection per host (`collect_status_metrics_by_host`) is disabled."
)
collate_status_tags_per_host = False

tag_service_check_by_host = _is_affirmative(
instance.get('tag_service_check_by_host', False)
)
Expand All @@ -127,7 +116,6 @@ def check(self, instance):
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter,
count_status_by_service=count_status_by_service,
collate_status_tags_per_host=collate_status_tags_per_host
)

def _fetch_data(self, url, username, password):
Expand All @@ -147,8 +135,7 @@ def _fetch_data(self, url, username, password):
def _process_data(self, data, collect_aggregates_only, process_events, url=None,
collect_status_metrics=False, collect_status_metrics_by_host=False,
tag_service_check_by_host=False, services_incl_filter=None,
services_excl_filter=None, count_status_by_service=True,
collate_status_tags_per_host=False):
services_excl_filter=None, count_status_by_service=True):
''' Main data-processing loop. For each piece of useful data, we'll
either save a metric, save an event or both. '''

Expand Down Expand Up @@ -205,8 +192,7 @@ def _process_data(self, data, collect_aggregates_only, process_events, url=None,
self.hosts_statuses, collect_status_metrics_by_host,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter,
count_status_by_service=count_status_by_service,
collate_status_tags_per_host=collate_status_tags_per_host
count_status_by_service=count_status_by_service
)

self._process_backend_hosts_metric(
Expand Down Expand Up @@ -319,7 +305,7 @@ def _process_backend_hosts_metric(self, hosts_statuses, services_incl_filter=Non

def _process_status_metric(self, hosts_statuses, collect_status_metrics_by_host,
services_incl_filter=None, services_excl_filter=None,
count_status_by_service=True, collate_status_tags_per_host=False):
count_status_by_service=True):
agg_statuses = defaultdict(lambda: {'available': 0, 'unavailable': 0})

# use a counter unless we have a unique tag set to gauge
Expand All @@ -345,16 +331,10 @@ def _process_status_metric(self, hosts_statuses, collect_status_metrics_by_host,
if collect_status_metrics_by_host:
tags.append('backend:%s' % hostname)

if collate_status_tags_per_host:
self._gauge_collated_statuses(
"haproxy.count_per_status",
count, status, tags, counter
)
else:
self._gauge_all_statuses(
"haproxy.count_per_status",
count, status, tags, counter
)
self._gauge_all_statuses(
"haproxy.count_per_status",
count, status, tags, counter
)

if 'up' in status or 'open' in status:
agg_statuses[service]['available'] += count
Expand Down Expand Up @@ -387,21 +367,6 @@ def _gauge_all_statuses(self, metric_name, count, status, tags, counter):
if state != status:
self.gauge(metric_name, 0, tags + ['status:%s' % state.replace(" ", "_")])

def _gauge_collated_statuses(self, metric_name, count, status, tags, counter):
collated_status = Services.STATUS_MAP.get(status)
if not collated_status:
# We can't properly collate this guy, because it's a status we don't expect,
# let's abandon collation
self.log.warning("Unexpected status found %s", status)
self._gauge_all_statuses(metric_name, count, status, tags, counter)
return

self.gauge(metric_name, count, tags + ['status:%s' % collated_status])

for state in ['up', 'down']:
if collated_status != state:
self.gauge(metric_name, 0, tags + ['status:%s' % state])

def _process_metrics(self, data, url, services_incl_filter=None,
services_excl_filter=None):
"""
Expand Down

0 comments on commit f6b7545

Please sign in to comment.