From a6aa7432ae3f5543befdae1b7c1a1ad731591209 Mon Sep 17 00:00:00 2001 From: Dave Collins Date: Tue, 7 Apr 2015 11:52:53 +1000 Subject: [PATCH 1/7] Added option to use full test name in report --- src/pytest_benchmark/plugin.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/pytest_benchmark/plugin.py b/src/pytest_benchmark/plugin.py index b2f6935..a5a12bb 100644 --- a/src/pytest_benchmark/plugin.py +++ b/src/pytest_benchmark/plugin.py @@ -133,6 +133,11 @@ def pytest_addoption(parser): action="store_true", default=False, help="Only run benchmarks." ) + group.addoption( + "--benchmark-name-length", choices=['short', 'full'], + default='short', + help="length of name in report" + ) class BenchmarkStats(RunningStats): @@ -416,12 +421,18 @@ def benchmark(request): pytest.skip("Benchmarks are disabled.") else: node = request.node + NameLength = request.config.getoption("benchmark_name_length") + if NameLength == 'full': + name = node._nodeid + else: + name = node.name + marker = node.get_marker("benchmark") options = marker.kwargs if marker else {} if 'timer' in options: options['timer'] = NameWrapper(options['timer']) benchmark = BenchmarkFixture( - node.name, + name, add_stats=benchmarksession._benchmarks.append, logger=DiagnosticLogger( benchmarksession._verbose, From 70d65565a61c5fbcde27cbb77b167c341fcce5aa Mon Sep 17 00:00:00 2001 From: Dave Collins Date: Mon, 13 Apr 2015 15:26:04 +1000 Subject: [PATCH 2/7] Added a json reporter --- src/pytest_benchmark/newhooks.py | 3 ++ src/pytest_benchmark/plugin.py | 65 ++++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+) create mode 100644 src/pytest_benchmark/newhooks.py diff --git a/src/pytest_benchmark/newhooks.py b/src/pytest_benchmark/newhooks.py new file mode 100644 index 0000000..ab1317f --- /dev/null +++ b/src/pytest_benchmark/newhooks.py @@ -0,0 +1,3 @@ + +def pytest_benchmark_add_extra_info(headerDict): + pass diff --git a/src/pytest_benchmark/plugin.py b/src/pytest_benchmark/plugin.py index a5a12bb..8d4f503 100644 --- a/src/pytest_benchmark/plugin.py +++ b/src/pytest_benchmark/plugin.py @@ -4,10 +4,12 @@ from decimal import Decimal import argparse import gc +import json import math import py import pytest import sys +import socket import time from .compat import XRANGE, PY3 @@ -15,6 +17,8 @@ from .timers import compute_timer_precision from .timers import default_timer +import newhooks + class NameWrapper(object): @@ -138,6 +142,13 @@ def pytest_addoption(parser): default='short', help="length of name in report" ) + group.addoption('--benchmark-json-path', action="store", + dest="benchmark_json_path", metavar="path", default=None, + help="create json report file at given path.") + + +def pytest_addhooks(pluginmanager): + pluginmanager.addhooks(newhooks) class BenchmarkStats(RunningStats): @@ -346,6 +357,8 @@ def pytest_terminal_summary(terminalreporter): if not benchmarksession._benchmarks: return + write_json(terminalreporter) + timer = benchmarksession._options.get('timer') groups = defaultdict(list) @@ -413,6 +426,58 @@ def pytest_terminal_summary(terminalreporter): tr.write_line("") +def write_json(terminalreporter): + tr = terminalreporter + benchmarksession = tr.config._benchmarksession + + if not benchmarksession._benchmarks: + return + if not tr.config.option.benchmark_json_path: + return + + jsonData = {} + + jsonData['header'] = {} + + jsonData['header']['hostname'] = socket.gethostname() + + tr.config.hook.pytest_benchmark_add_extra_info(headerDict=jsonData['header']) + + + groups = defaultdict(list) + for bench in benchmarksession._benchmarks: + groups[bench.group].append(bench) + + + labels = { + "name": "Name", + "min": "Min", + "max": "Max", + "mean": "Mean", + "stddev": "StdDev", + "runs": "Rounds", + "scale": "Iterations", + } + allBenchmarks = {} + for group, benchmarks in sorted(groups.items(), key=lambda pair: pair[0] or ""): + if group is None: + group = 'default' + groupData = [] + for benchmark in benchmarks: + tt = { jsonName: benchmark[prop] for prop, jsonName in labels.items() } + tt['status'] = 'passed' + allBenchmarks[tt['Name']] = tt + groupData.append(tt) + jsonData[group] = groupData + + for status in ('error', 'failed'): + for rep in tr.getreports(status): + allBenchmarks[rep.nodeid]['status'] = status + + with open(tr.config.option.benchmark_json_path,'w') as f: + f.write(json.dumps(jsonData, indent=4)) + + @pytest.fixture(scope="function") def benchmark(request): benchmarksession = request.config._benchmarksession From 9ba2aa6f024563439be5c82ef8becc22af1b435e Mon Sep 17 00:00:00 2001 From: Dave Collins Date: Thu, 16 Apr 2015 13:40:29 +1000 Subject: [PATCH 3/7] Added the date into the json report header --- src/pytest_benchmark/plugin.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/pytest_benchmark/plugin.py b/src/pytest_benchmark/plugin.py index 8d4f503..0862194 100644 --- a/src/pytest_benchmark/plugin.py +++ b/src/pytest_benchmark/plugin.py @@ -3,6 +3,7 @@ from collections import defaultdict from decimal import Decimal import argparse +import datetime import gc import json import math @@ -440,6 +441,7 @@ def write_json(terminalreporter): jsonData['header'] = {} jsonData['header']['hostname'] = socket.gethostname() + jsonData['header']['report_datetime'] = datetime.datetime.utcnow().isoformat() tr.config.hook.pytest_benchmark_add_extra_info(headerDict=jsonData['header']) From 146b456c81b461e547d8050bc25f9a10ff85a7f5 Mon Sep 17 00:00:00 2001 From: Dave Collins Date: Fri, 17 Apr 2015 11:51:53 +1000 Subject: [PATCH 4/7] Updated documentation for json output --- CHANGELOG.rst | 5 +++++ README.rst | 4 ++++ 2 files changed, 9 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 604fbd0..4197c01 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,11 @@ Changelog ========= +HEAD (2015-04-16) +----------------- + +* Added json reporting + 2.4.1 (2015-03-16) ------------------ diff --git a/README.rst b/README.rst index bcf07ce..cfb8bd2 100644 --- a/README.rst +++ b/README.rst @@ -125,6 +125,10 @@ If you need to do some wrapping (like special setup), you can use it as a decora Disable GC during benchmarks. --benchmark-skip Skip running any benchmarks. --benchmark-only Only run benchmarks. + --benchmark-name-length={short,full} + length of name in report + --benchmark-json-path=path + create json report file at given path. Setting per-test options: From 0ecbf98b6df1fe8e340766a6429341966ff559b6 Mon Sep 17 00:00:00 2001 From: Dave Collins Date: Mon, 20 Apr 2015 15:59:31 +1000 Subject: [PATCH 5/7] Fixed the tests --- tests/test_help.t | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/test_help.t b/tests/test_help.t index 9174710..dc0a53a 100644 --- a/tests/test_help.t +++ b/tests/test_help.t @@ -20,4 +20,7 @@ Disable GC during benchmarks. --benchmark-skip Skip running any benchmarks. --benchmark-only Only run benchmarks. - \s* (re) + --benchmark-name-length={short,full} + length of name in report + --benchmark-json-path=path + create json report file at given path. From 886779ffb75811e9ad554202dbbd5398fa464f64 Mon Sep 17 00:00:00 2001 From: Dave Collins Date: Tue, 21 Apr 2015 08:56:29 +1000 Subject: [PATCH 6/7] Renamed hooks file Corrected py3 behaviour --- src/pytest_benchmark/benchmark_hooks.py | 13 +++++++++++++ src/pytest_benchmark/newhooks.py | 3 --- src/pytest_benchmark/plugin.py | 5 +++-- 3 files changed, 16 insertions(+), 5 deletions(-) create mode 100644 src/pytest_benchmark/benchmark_hooks.py delete mode 100644 src/pytest_benchmark/newhooks.py diff --git a/src/pytest_benchmark/benchmark_hooks.py b/src/pytest_benchmark/benchmark_hooks.py new file mode 100644 index 0000000..df95e9a --- /dev/null +++ b/src/pytest_benchmark/benchmark_hooks.py @@ -0,0 +1,13 @@ + +def pytest_benchmark_add_extra_info(headerDict): + """ called during json report preperation. + + Extra information can be added to the report header + + headerDict['user'] = getpass.getuser() + + head_sha = subprocess.check_output('git rev-parse HEAD', shell=True) + + headerDict['revision'] = head_sha.strip() + """ + pass diff --git a/src/pytest_benchmark/newhooks.py b/src/pytest_benchmark/newhooks.py deleted file mode 100644 index ab1317f..0000000 --- a/src/pytest_benchmark/newhooks.py +++ /dev/null @@ -1,3 +0,0 @@ - -def pytest_benchmark_add_extra_info(headerDict): - pass diff --git a/src/pytest_benchmark/plugin.py b/src/pytest_benchmark/plugin.py index 0862194..cf810cf 100644 --- a/src/pytest_benchmark/plugin.py +++ b/src/pytest_benchmark/plugin.py @@ -18,7 +18,7 @@ from .timers import compute_timer_precision from .timers import default_timer -import newhooks +from . import benchmark_hooks class NameWrapper(object): @@ -149,7 +149,8 @@ def pytest_addoption(parser): def pytest_addhooks(pluginmanager): - pluginmanager.addhooks(newhooks) + ''' install hooks so users add add extra info to the json report header''' + pluginmanager.addhooks(benchmark_hooks) class BenchmarkStats(RunningStats): From f1e263a2726dc096b9a160178bbcbd9ead2a3eb7 Mon Sep 17 00:00:00 2001 From: Dave Collins Date: Tue, 21 Apr 2015 09:18:10 +1000 Subject: [PATCH 7/7] Update documentation for json report --- README.rst | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/README.rst b/README.rst index cfb8bd2..6b7b32a 100644 --- a/README.rst +++ b/README.rst @@ -206,6 +206,28 @@ pytest-benchmark[aspect]`): f = Foo() f.run() + +JSON report +=========== + +pytest-benchmark can produce a report of activity as a JSON file. +Just specify a location for the report using --benchmark-json-path. It's also recommended that you +set --benchmark-name-length=full if you have a large test suite, this will guarantee unique names +for all the tests + +You can added extra information to the header of the report by adding the +pytest_benchmark_add_extra_info hook to your conftest.py. + +.. sourcecode:: python + + def pytest_benchmark_add_extra_info(headerDict): + headerDict['user'] = getpass.getuser() + + head_sha = subprocess.check_output('git rev-parse HEAD', shell=True) + + headerDict['revision'] = head_sha.strip() + + Documentation =============