diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 604fbd0..4197c01 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,11 @@ Changelog ========= +HEAD (2015-04-16) +----------------- + +* Added json reporting + 2.4.1 (2015-03-16) ------------------ diff --git a/README.rst b/README.rst index bcf07ce..6b7b32a 100644 --- a/README.rst +++ b/README.rst @@ -125,6 +125,10 @@ If you need to do some wrapping (like special setup), you can use it as a decora Disable GC during benchmarks. --benchmark-skip Skip running any benchmarks. --benchmark-only Only run benchmarks. + --benchmark-name-length={short,full} + length of name in report + --benchmark-json-path=path + create json report file at given path. Setting per-test options: @@ -202,6 +206,28 @@ pytest-benchmark[aspect]`): f = Foo() f.run() + +JSON report +=========== + +pytest-benchmark can produce a report of activity as a JSON file. +Just specify a location for the report using --benchmark-json-path. It's also recommended that you +set --benchmark-name-length=full if you have a large test suite, this will guarantee unique names +for all the tests + +You can added extra information to the header of the report by adding the +pytest_benchmark_add_extra_info hook to your conftest.py. + +.. sourcecode:: python + + def pytest_benchmark_add_extra_info(headerDict): + headerDict['user'] = getpass.getuser() + + head_sha = subprocess.check_output('git rev-parse HEAD', shell=True) + + headerDict['revision'] = head_sha.strip() + + Documentation ============= diff --git a/src/pytest_benchmark/benchmark_hooks.py b/src/pytest_benchmark/benchmark_hooks.py new file mode 100644 index 0000000..df95e9a --- /dev/null +++ b/src/pytest_benchmark/benchmark_hooks.py @@ -0,0 +1,13 @@ + +def pytest_benchmark_add_extra_info(headerDict): + """ called during json report preperation. + + Extra information can be added to the report header + + headerDict['user'] = getpass.getuser() + + head_sha = subprocess.check_output('git rev-parse HEAD', shell=True) + + headerDict['revision'] = head_sha.strip() + """ + pass diff --git a/src/pytest_benchmark/plugin.py b/src/pytest_benchmark/plugin.py index b2f6935..cf810cf 100644 --- a/src/pytest_benchmark/plugin.py +++ b/src/pytest_benchmark/plugin.py @@ -3,11 +3,14 @@ from collections import defaultdict from decimal import Decimal import argparse +import datetime import gc +import json import math import py import pytest import sys +import socket import time from .compat import XRANGE, PY3 @@ -15,6 +18,8 @@ from .timers import compute_timer_precision from .timers import default_timer +from . import benchmark_hooks + class NameWrapper(object): @@ -133,6 +138,19 @@ def pytest_addoption(parser): action="store_true", default=False, help="Only run benchmarks." ) + group.addoption( + "--benchmark-name-length", choices=['short', 'full'], + default='short', + help="length of name in report" + ) + group.addoption('--benchmark-json-path', action="store", + dest="benchmark_json_path", metavar="path", default=None, + help="create json report file at given path.") + + +def pytest_addhooks(pluginmanager): + ''' install hooks so users add add extra info to the json report header''' + pluginmanager.addhooks(benchmark_hooks) class BenchmarkStats(RunningStats): @@ -341,6 +359,8 @@ def pytest_terminal_summary(terminalreporter): if not benchmarksession._benchmarks: return + write_json(terminalreporter) + timer = benchmarksession._options.get('timer') groups = defaultdict(list) @@ -408,6 +428,59 @@ def pytest_terminal_summary(terminalreporter): tr.write_line("") +def write_json(terminalreporter): + tr = terminalreporter + benchmarksession = tr.config._benchmarksession + + if not benchmarksession._benchmarks: + return + if not tr.config.option.benchmark_json_path: + return + + jsonData = {} + + jsonData['header'] = {} + + jsonData['header']['hostname'] = socket.gethostname() + jsonData['header']['report_datetime'] = datetime.datetime.utcnow().isoformat() + + tr.config.hook.pytest_benchmark_add_extra_info(headerDict=jsonData['header']) + + + groups = defaultdict(list) + for bench in benchmarksession._benchmarks: + groups[bench.group].append(bench) + + + labels = { + "name": "Name", + "min": "Min", + "max": "Max", + "mean": "Mean", + "stddev": "StdDev", + "runs": "Rounds", + "scale": "Iterations", + } + allBenchmarks = {} + for group, benchmarks in sorted(groups.items(), key=lambda pair: pair[0] or ""): + if group is None: + group = 'default' + groupData = [] + for benchmark in benchmarks: + tt = { jsonName: benchmark[prop] for prop, jsonName in labels.items() } + tt['status'] = 'passed' + allBenchmarks[tt['Name']] = tt + groupData.append(tt) + jsonData[group] = groupData + + for status in ('error', 'failed'): + for rep in tr.getreports(status): + allBenchmarks[rep.nodeid]['status'] = status + + with open(tr.config.option.benchmark_json_path,'w') as f: + f.write(json.dumps(jsonData, indent=4)) + + @pytest.fixture(scope="function") def benchmark(request): benchmarksession = request.config._benchmarksession @@ -416,12 +489,18 @@ def benchmark(request): pytest.skip("Benchmarks are disabled.") else: node = request.node + NameLength = request.config.getoption("benchmark_name_length") + if NameLength == 'full': + name = node._nodeid + else: + name = node.name + marker = node.get_marker("benchmark") options = marker.kwargs if marker else {} if 'timer' in options: options['timer'] = NameWrapper(options['timer']) benchmark = BenchmarkFixture( - node.name, + name, add_stats=benchmarksession._benchmarks.append, logger=DiagnosticLogger( benchmarksession._verbose, diff --git a/tests/test_help.t b/tests/test_help.t index 9174710..dc0a53a 100644 --- a/tests/test_help.t +++ b/tests/test_help.t @@ -20,4 +20,7 @@ Disable GC during benchmarks. --benchmark-skip Skip running any benchmarks. --benchmark-only Only run benchmarks. - \s* (re) + --benchmark-name-length={short,full} + length of name in report + --benchmark-json-path=path + create json report file at given path.