Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added json output options #8

Closed
wants to merge 7 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions CHANGELOG.rst
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,11 @@
Changelog
=========

HEAD (2015-04-16)
-----------------

* Added json reporting

2.4.1 (2015-03-16)
------------------

Expand Down
26 changes: 26 additions & 0 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,10 @@ If you need to do some wrapping (like special setup), you can use it as a decora
Disable GC during benchmarks.
--benchmark-skip Skip running any benchmarks.
--benchmark-only Only run benchmarks.
--benchmark-name-length={short,full}
length of name in report
--benchmark-json-path=path
create json report file at given path.


Setting per-test options:
Expand Down Expand Up @@ -202,6 +206,28 @@ pytest-benchmark[aspect]`):
f = Foo()
f.run()


JSON report
===========

pytest-benchmark can produce a report of activity as a JSON file.
Just specify a location for the report using --benchmark-json-path. It's also recommended that you
set --benchmark-name-length=full if you have a large test suite, this will guarantee unique names
for all the tests

You can added extra information to the header of the report by adding the
pytest_benchmark_add_extra_info hook to your conftest.py.

.. sourcecode:: python

def pytest_benchmark_add_extra_info(headerDict):
headerDict['user'] = getpass.getuser()

head_sha = subprocess.check_output('git rev-parse HEAD', shell=True)

headerDict['revision'] = head_sha.strip()


Documentation
=============

Expand Down
13 changes: 13 additions & 0 deletions src/pytest_benchmark/benchmark_hooks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@

def pytest_benchmark_add_extra_info(headerDict):
""" called during json report preperation.

Extra information can be added to the report header

headerDict['user'] = getpass.getuser()

head_sha = subprocess.check_output('git rev-parse HEAD', shell=True)

headerDict['revision'] = head_sha.strip()
"""
pass
81 changes: 80 additions & 1 deletion src/pytest_benchmark/plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,18 +3,23 @@
from collections import defaultdict
from decimal import Decimal
import argparse
import datetime
import gc
import json
import math
import py
import pytest
import sys
import socket
import time

from .compat import XRANGE, PY3
from .stats import RunningStats
from .timers import compute_timer_precision
from .timers import default_timer

from . import benchmark_hooks


class NameWrapper(object):

Expand Down Expand Up @@ -133,6 +138,19 @@ def pytest_addoption(parser):
action="store_true", default=False,
help="Only run benchmarks."
)
group.addoption(
"--benchmark-name-length", choices=['short', 'full'],
default='short',
help="length of name in report"
)
group.addoption('--benchmark-json-path', action="store",
dest="benchmark_json_path", metavar="path", default=None,
help="create json report file at given path.")


def pytest_addhooks(pluginmanager):
''' install hooks so users add add extra info to the json report header'''
pluginmanager.addhooks(benchmark_hooks)


class BenchmarkStats(RunningStats):
Expand Down Expand Up @@ -341,6 +359,8 @@ def pytest_terminal_summary(terminalreporter):
if not benchmarksession._benchmarks:
return

write_json(terminalreporter)

timer = benchmarksession._options.get('timer')

groups = defaultdict(list)
Expand Down Expand Up @@ -408,6 +428,59 @@ def pytest_terminal_summary(terminalreporter):
tr.write_line("")


def write_json(terminalreporter):
tr = terminalreporter
benchmarksession = tr.config._benchmarksession

if not benchmarksession._benchmarks:
return
if not tr.config.option.benchmark_json_path:
return

jsonData = {}

jsonData['header'] = {}

jsonData['header']['hostname'] = socket.gethostname()
jsonData['header']['report_datetime'] = datetime.datetime.utcnow().isoformat()

tr.config.hook.pytest_benchmark_add_extra_info(headerDict=jsonData['header'])


groups = defaultdict(list)
for bench in benchmarksession._benchmarks:
groups[bench.group].append(bench)


labels = {
"name": "Name",
"min": "Min",
"max": "Max",
"mean": "Mean",
"stddev": "StdDev",
"runs": "Rounds",
"scale": "Iterations",
}
allBenchmarks = {}
for group, benchmarks in sorted(groups.items(), key=lambda pair: pair[0] or ""):
if group is None:
group = 'default'
groupData = []
for benchmark in benchmarks:
tt = { jsonName: benchmark[prop] for prop, jsonName in labels.items() }
tt['status'] = 'passed'
allBenchmarks[tt['Name']] = tt
groupData.append(tt)
jsonData[group] = groupData

for status in ('error', 'failed'):
for rep in tr.getreports(status):
allBenchmarks[rep.nodeid]['status'] = status

with open(tr.config.option.benchmark_json_path,'w') as f:
f.write(json.dumps(jsonData, indent=4))


@pytest.fixture(scope="function")
def benchmark(request):
benchmarksession = request.config._benchmarksession
Expand All @@ -416,12 +489,18 @@ def benchmark(request):
pytest.skip("Benchmarks are disabled.")
else:
node = request.node
NameLength = request.config.getoption("benchmark_name_length")
if NameLength == 'full':
name = node._nodeid
else:
name = node.name

marker = node.get_marker("benchmark")
options = marker.kwargs if marker else {}
if 'timer' in options:
options['timer'] = NameWrapper(options['timer'])
benchmark = BenchmarkFixture(
node.name,
name,
add_stats=benchmarksession._benchmarks.append,
logger=DiagnosticLogger(
benchmarksession._verbose,
Expand Down
5 changes: 4 additions & 1 deletion tests/test_help.t
Original file line number Diff line number Diff line change
Expand Up @@ -20,4 +20,7 @@
Disable GC during benchmarks.
--benchmark-skip Skip running any benchmarks.
--benchmark-only Only run benchmarks.
\s* (re)
--benchmark-name-length={short,full}
length of name in report
--benchmark-json-path=path
create json report file at given path.