From ee5d36e66dc919a3bd6a8076ec9069143d26007d Mon Sep 17 00:00:00 2001 From: "Daniel T. Lee" Date: Mon, 22 Oct 2018 01:03:06 +0900 Subject: [PATCH 1/3] test: parallel test implement with multiprocessing This commit enables running parallel tests through multiprocessing pool. Due to the concurrent test, the result won't be shown in an orderly manner. So instead of showing result directly, as worker processes put their results in a shared variable, it can wait and print the result in the original order. Build worker count can be passed with '-j 8', just like GNU make does. ex) > export TESTARG='-j 8'; make test; > ... > ./runtest.py all > Start 211 tests with 8 worker > Test case pg finstrument-fu > ------------------------: O0 O1 O2 O3 Os O0 O1 O2 O3 Os > 001 basic : OK NZ NZ OK NZ NZ NZ OK OK NZ > 002 argument : OK OK OK OK OK OK OK OK OK OK Signed-off-by: Namhyung Kim Signed-off-by: Daniel T. Lee --- tests/runtest.py | 113 +++++++++++++++++++++++++++++++++-------------- 1 file changed, 81 insertions(+), 32 deletions(-) diff --git a/tests/runtest.py b/tests/runtest.py index a583bc210..4f51d9ca3 100755 --- a/tests/runtest.py +++ b/tests/runtest.py @@ -3,6 +3,8 @@ import os, sys import glob, re import subprocess as sp +import multiprocessing +import time class TestBase: supported_lang = { @@ -447,6 +449,7 @@ def timeout_handler(sig, frame): TestBase.TEST_SUCCESS_FIXED: 'Test succeeded (with some fixup)', } + def run_single_case(case, flags, opts, arg): result = [] @@ -469,6 +472,15 @@ def run_single_case(case, flags, opts, arg): return result + +def save_test_result(result, case, shared): + shared.results[case] = result + shared.progress += 1 + for r in result: + shared.stats[r] += 1 + shared.total += 1 + + def print_test_result(case, result, color): if sys.stdout.isatty() and color: result_list = [colored_result[r] for r in result] @@ -480,6 +492,40 @@ def print_test_result(case, result, color): sys.stdout.write(output) +def print_test_header(opts, flags): + optslen = len(opts) + + header1 = '%-24s ' % 'Test case' + header2 = '-' * 24 + ':' + empty = ' ' + + for flag in flags: + # align with optimization flags + header1 += ' ' + flag[:optslen] + empty[len(flag):optslen] + header2 += ' ' + opts + + print(header1) + print(header2) + + + + +def print_test_report(arg, shared): + success = shared.stats[TestBase.TEST_SUCCESS] + shared.stats[TestBase.TEST_SUCCESS_FIXED] + percent = 100.0 * success / shared.total + + print("") + print("runtime test stats") + print("====================") + print("total %5d Tests executed (success: %.2f%%)" % (shared.total, percent)) + for r in res: + if sys.stdout.isatty() and arg.color: + result = colored_result[r] + else: + result = text_result[r] + print(" %s: %5d %s" % (result, shared.stats[r], result_string[r])) + + def parse_argument(): import argparse @@ -503,9 +549,12 @@ def parse_argument(): help="suppress color in the output") parser.add_argument("-t", "--timeout", dest='timeout', default=5, help="fail test if it runs more than TIMEOUT seconds") + parser.add_argument("-j", "--worker", dest='worker', type=int, default=multiprocessing.cpu_count(), + help="Parallel worker count; using all core for default") return parser.parse_args() + if __name__ == "__main__": # prevent to create .pyc files (it makes some tests failed) os.environ["PYTHONDONTWRITEBYTECODE"] = "1" @@ -522,12 +571,7 @@ def parse_argument(): print("cannot find testcase for : %s" % arg.case) sys.exit(0) - opts = ' '.join(sorted(['O'+o for o in arg.opts])) - optslen = len(opts) - - header1 = '%-24s ' % 'Test case' - header2 = '-' * 24 + ':' - empty = ' ' + opts = ' '.join(sorted(['O' + o for o in arg.opts])) if arg.pg_flag: flags = ['pg'] @@ -535,15 +579,16 @@ def parse_argument(): flags = ['finstrument-functions'] else: flags = arg.flags.split() - for flag in flags: - # align with optimization flags - header1 += ' ' + flag[:optslen] + empty[len(flag):optslen] - header2 += ' ' + opts - print(header1) - print(header2) + from functools import partial + + manager = multiprocessing.Manager() + shared = manager.dict() - total = 0 + shared.tests_count = len(testcases) + shared.progress = 0 + shared.results = dict() + shared.total = 0 res = [] res.append(TestBase.TEST_SUCCESS) res.append(TestBase.TEST_SUCCESS_FIXED) @@ -555,26 +600,30 @@ def parse_argument(): res.append(TestBase.TEST_UNSUPP_LANG) res.append(TestBase.TEST_SKIP) - stats = dict.fromkeys(res, 0) + shared.stats = dict.fromkeys(res, 0) + pool = multiprocessing.Pool(arg.worker) for tc in sorted(testcases): - name = tc[:-3] # remove '.py' - result = run_single_case(name, flags, opts.split(), arg) - print_test_result(name, result, arg.color) - for r in result: - stats[r] += 1 - total += 1 + name = tc.split('.')[0] # remove '.py' + clbk = partial(save_test_result, case=name, shared=shared) + pool.apply_async(run_single_case, callback=clbk, + args=[name, flags, opts.split(), arg]) - success = stats[TestBase.TEST_SUCCESS] + stats[TestBase.TEST_SUCCESS_FIXED] - percent = 100.0 * success / total + print("Start %s tests with %d worker" % (shared.tests_count, arg.worker)) + print_test_header(opts, flags) - print("") - print("runtime test stats") - print("====================") - print("total %5d Tests executed (success: %.2f%%)" % (total, percent)) - for r in res: - if sys.stdout.isatty() and arg.color: - result = colored_result[r] - else: - result = text_result[r] - print(" %s: %5d %s" % (result, stats[r], result_string[r])) + for tc in sorted(testcases): + name = tc.split('.')[0] # remove '.py' + + while name not in shared.results: + time.sleep(1) + + print_test_result(name, shared.results[name], arg.color) + + pool.close() + pool.join() + + sys.stdout.write("\n") + sys.stdout.flush() + + print_test_report(arg, shared) From 02f9fc508671f1bc4a0e0f63333ec7f35cae6c45 Mon Sep 17 00:00:00 2001 From: "Daniel T. Lee" Date: Mon, 22 Oct 2018 01:22:27 +0900 Subject: [PATCH 2/3] test: fix conflict of a concurrent build of same test binary When running tests, same test binary will be compiled at the same time. For instance, 004, 005, 006 tests are using `s-abc` as a test binary. Running concurrent build will compete to build their own targets. And during this condition, binary waiting for a test can be removed. To solve, this commit creates temp directory with tempfile.mkdtemp() And to this directory, test will build with abs path of source, and test with absolute uftrace path. Signed-off-by: Daniel T. Lee --- tests/runtest.py | 20 ++++++++++++++++++-- tests/t157_script_python.py | 2 +- tests/t199_script_info.py | 2 +- tests/t207_dump_graphviz.py | 7 ++----- 4 files changed, 22 insertions(+), 9 deletions(-) diff --git a/tests/runtest.py b/tests/runtest.py index 4f51d9ca3..9da103d46 100755 --- a/tests/runtest.py +++ b/tests/runtest.py @@ -1,6 +1,7 @@ #!/usr/bin/env python import os, sys +import tempfile import glob, re import subprocess as sp import multiprocessing @@ -22,13 +23,17 @@ class TestBase: TEST_SKIP = -7 TEST_SUCCESS_FIXED = -8 - objdir = 'objdir' in os.environ and os.environ['objdir'] or '..' + basedir = os.path.dirname(os.getcwd()) + objdir = 'objdir' in os.environ and os.environ['objdir'] or basedir uftrace_cmd = objdir + '/uftrace --no-pager --no-event -L' + objdir default_cflags = ['-fno-inline', '-fno-builtin', '-fno-ipa-cp', '-fno-omit-frame-pointer', '-D_FORTIFY_SOURCE=0'] def __init__(self, name, result, lang='C', cflags='', ldflags='', sort='task'): + _tmp = tempfile.mkdtemp(prefix='test_%s_' % name) + os.chdir(_tmp) + self.test_dir = _tmp self.name = name self.result = result self.cflags = cflags @@ -43,7 +48,16 @@ def pr_debug(self, msg): if self.debug: print(msg) + def convert_abs_path(self, build_cmd): + cmd = build_cmd.split() + src_idx = [i for i, _cmd in enumerate(cmd) if _cmd.startswith('s-')][0] + abs_src = os.path.join(self.basedir, 'tests', cmd[src_idx]) + cmd[src_idx] = abs_src + return " ".join(cmd) + def build_it(self, build_cmd): + build_cmd = self.convert_abs_path(build_cmd) + try: p = sp.Popen(build_cmd.split(), stderr=sp.PIPE) if p.wait() != 0: @@ -318,7 +332,7 @@ def fixup(self, cflags, result): def check_dependency(self, item): import os.path - return os.path.exists('../check-deps/' + item) + return os.path.exists('%s/check-deps/' % self.basedir + item) def check_perf_paranoid(self): try: @@ -407,6 +421,8 @@ def timeout_handler(sig, frame): return ret + def __del__(self): + sp.call(['rm', '-rf', self.test_dir]) RED = '\033[1;31m' GREEN = '\033[1;32m' diff --git a/tests/t157_script_python.py b/tests/t157_script_python.py index 45e8a6f7b..65460a64b 100644 --- a/tests/t157_script_python.py +++ b/tests/t157_script_python.py @@ -21,7 +21,7 @@ def pre(self): def runcmd(self): uftrace = TestBase.uftrace_cmd - options = '-F main -S ../scripts/count.py' + options = '-F main -S %s/scripts/count.py' % self.basedir return '%s script -d %s %s' % (uftrace, TDIR, options) def sort(self, output): diff --git a/tests/t199_script_info.py b/tests/t199_script_info.py index 68f18e6ae..86f9639eb 100644 --- a/tests/t199_script_info.py +++ b/tests/t199_script_info.py @@ -25,7 +25,7 @@ def pre(self): def runcmd(self): uftrace = TestBase.uftrace_cmd - options = '-F main -S ../scripts/info.py foo bar' + options = '-F main -S %s/scripts/info.py foo bar' % self.basedir return '%s script -d %s %s' % (uftrace, TDIR, options) def sort(self, output): diff --git a/tests/t207_dump_graphviz.py b/tests/t207_dump_graphviz.py index ffea3bcaa..166601683 100644 --- a/tests/t207_dump_graphviz.py +++ b/tests/t207_dump_graphviz.py @@ -8,11 +8,7 @@ class TestCase(TestBase): def __init__(self): - currentPath = os.path.dirname(os.path.abspath(__file__)) - testPath = os.path.join(currentPath, "t-abc") - TestBase.__init__(self, 'abc', - "digraph \"" + testPath + "\" {" + - """ + TestBase.__init__(self, 'abc', """digraph "%s" { # Attributes splines=ortho; @@ -27,6 +23,7 @@ def __init__(self): "b" -> "c" [xlabel = "Calls : 1"] } """) + self.result = self.result % os.path.join(self.test_dir, "t-abc") def pre(self): record_cmd = '%s record -d %s %s' % (TestBase.uftrace_cmd, TDIR, 't-' + self.name) From dd4bf9385ba9e2f4b28d53caa9352c391c883c7c Mon Sep 17 00:00:00 2001 From: "Daniel T. Lee" Date: Mon, 22 Oct 2018 01:39:07 +0900 Subject: [PATCH 3/3] test: fix default socket bind error with parallel test Tests using sockets such as recv are using 8090 port as default socket. During a parallel test, conflict of using the same socket will lead to failure. To solve this, test cases will generate random port at initializing and stick on that port so socket will not be conflicted. Signed-off-by: Daniel T. Lee --- tests/runtest.py | 4 ++++ tests/t141_recv_basic.py | 5 +++-- tests/t142_recv_multi.py | 7 ++++--- tests/t143_recv_kernel.py | 5 +++-- tests/t150_recv_event.py | 6 ++++-- tests/t151_recv_runcmd.py | 6 ++++-- tests/t167_recv_sched.py | 5 +++-- 7 files changed, 25 insertions(+), 13 deletions(-) diff --git a/tests/runtest.py b/tests/runtest.py index 9da103d46..569867da4 100755 --- a/tests/runtest.py +++ b/tests/runtest.py @@ -1,5 +1,6 @@ #!/usr/bin/env python +import random import os, sys import tempfile import glob, re @@ -48,6 +49,9 @@ def pr_debug(self, msg): if self.debug: print(msg) + def gen_port(self): + self.port = random.randint(40000, 50000) + def convert_abs_path(self, build_cmd): cmd = build_cmd.split() src_idx = [i for i, _cmd in enumerate(cmd) if _cmd.startswith('s-')][0] diff --git a/tests/t141_recv_basic.py b/tests/t141_recv_basic.py index f9c7b1e52..ab99aad9d 100644 --- a/tests/t141_recv_basic.py +++ b/tests/t141_recv_basic.py @@ -21,14 +21,15 @@ def __init__(self): 2.405 us [28141] | } /* a */ 3.005 us [28141] | } /* main */ """) + self.gen_port() recv_p = None def pre(self): - recv_cmd = '%s recv -d %s' % (TestBase.uftrace_cmd, TDIR) + recv_cmd = '%s recv -d %s --port %s' % (TestBase.uftrace_cmd, TDIR, self.port) self.recv_p = sp.Popen(recv_cmd.split()) - record_cmd = '%s record -H %s %s' % (TestBase.uftrace_cmd, 'localhost', 't-abc') + record_cmd = '%s record -H %s --port %s %s' % (TestBase.uftrace_cmd, 'localhost', self.port, 't-abc') sp.call(record_cmd.split()) return TestBase.TEST_SUCCESS diff --git a/tests/t142_recv_multi.py b/tests/t142_recv_multi.py index 664540703..0e60e3cf7 100644 --- a/tests/t142_recv_multi.py +++ b/tests/t142_recv_multi.py @@ -21,19 +21,20 @@ def __init__(self): 2.405 us [28141] | } /* a */ 3.005 us [28141] | } /* main */ """) + self.gen_port() recv_p = None def pre(self): - recv_cmd = '%s recv -d %s' % (TestBase.uftrace_cmd, TDIR) + recv_cmd = '%s recv -d %s --port %s' % (TestBase.uftrace_cmd, TDIR, self.port) self.recv_p = sp.Popen(recv_cmd.split()) # recorded but not used - record_cmd = '%s record -H %s %s' % (TestBase.uftrace_cmd, 'localhost', 't-abc') + record_cmd = '%s record -H %s --port %s %s' % (TestBase.uftrace_cmd, 'localhost', self.port, 't-abc') sp.call(record_cmd.split()) # use this - record_cmd = '%s record -H %s -d %s %s' % (TestBase.uftrace_cmd, 'localhost', TDIR2, 't-abc') + record_cmd = '%s record -H %s --port %s -d %s %s' % (TestBase.uftrace_cmd, 'localhost', self.port, TDIR2, 't-abc') sp.call(record_cmd.split()) return TestBase.TEST_SUCCESS diff --git a/tests/t143_recv_kernel.py b/tests/t143_recv_kernel.py index 7263dd130..413a57c30 100644 --- a/tests/t143_recv_kernel.py +++ b/tests/t143_recv_kernel.py @@ -22,6 +22,7 @@ def __init__(self): 37.325 us [18343] | } /* fclose */ 128.387 us [18343] | } /* main */ """) + self.gen_port() recv_p = None @@ -34,10 +35,10 @@ def pre(self): uftrace = TestBase.uftrace_cmd program = 't-' + self.name - recv_cmd = '%s recv -d %s' % (uftrace, TDIR) + recv_cmd = '%s recv -d %s --port %s' % (uftrace, TDIR, self.port) self.recv_p = sp.Popen(recv_cmd.split()) - argument = '-H %s -k -d %s' % ('localhost', TDIR2) + argument = '-H %s -k -d %s --port %s' % ('localhost', TDIR2, self.port) argument += ' -N %s@kernel' % '_*do_page_fault' record_cmd = '%s record %s %s' % (uftrace, argument, program) diff --git a/tests/t150_recv_event.py b/tests/t150_recv_event.py index 6192bc529..3e9698fb7 100644 --- a/tests/t150_recv_event.py +++ b/tests/t150_recv_event.py @@ -18,17 +18,19 @@ def __init__(self): 2.896 us [28141] | } /* foo */ 3.017 us [28141] | } /* main */ """) + self.gen_port() recv_p = None def pre(self): - recv_cmd = '%s recv -d %s' % (TestBase.uftrace_cmd, TDIR) + recv_cmd = '%s recv -d %s --port %s' % (TestBase.uftrace_cmd, TDIR, self.port) self.recv_p = sp.Popen(recv_cmd.split()) server = '-H 127.0.0.1' + port = '--port %s' % self.port option = '-E uftrace:event' prog = 't-' + self.name - record_cmd = '%s record %s %s %s' % (TestBase.uftrace_cmd, server, option, prog) + record_cmd = '%s record %s %s %s %s' % (TestBase.uftrace_cmd, server, port, option, prog) sp.call(record_cmd.split()) return TestBase.TEST_SUCCESS diff --git a/tests/t151_recv_runcmd.py b/tests/t151_recv_runcmd.py index 587c08d98..b41e1c210 100644 --- a/tests/t151_recv_runcmd.py +++ b/tests/t151_recv_runcmd.py @@ -21,16 +21,18 @@ def __init__(self): 2.405 us [28141] | } /* a */ 3.005 us [28141] | } /* main */ """) + self.gen_port() recv_p = None file_p = None def pre(self): self.file_p = open(TMPF, 'w+') - recv_cmd = TestBase.uftrace_cmd.split() + ['recv', '-d', TDIR, '--run-cmd', TestBase.uftrace_cmd + ' replay'] + recv_cmd = TestBase.uftrace_cmd.split() + \ + ['recv', '-d', TDIR, '--port', str(self.port), '--run-cmd', TestBase.uftrace_cmd + ' replay'] self.recv_p = sp.Popen(recv_cmd, stdout=self.file_p, stderr=self.file_p) - record_cmd = '%s record -H %s %s' % (TestBase.uftrace_cmd, 'localhost', 't-' + self.name) + record_cmd = '%s record -H %s --port %s %s' % (TestBase.uftrace_cmd, 'localhost', self.port, 't-' + self.name) sp.call(record_cmd.split()) return TestBase.TEST_SUCCESS diff --git a/tests/t167_recv_sched.py b/tests/t167_recv_sched.py index 060024c40..560281cab 100644 --- a/tests/t167_recv_sched.py +++ b/tests/t167_recv_sched.py @@ -26,6 +26,7 @@ def __init__(self): 2.120 ms [ 395] | } /* foo */ 2.121 ms [ 395] | } /* main */ """) + self.gen_port() recv_p = None @@ -35,10 +36,10 @@ def pre(self): if not TestBase.check_perf_paranoid(self): return TestBase.TEST_SKIP - recv_cmd = '%s recv -d %s' % (TestBase.uftrace_cmd, TDIR) + recv_cmd = '%s recv -d %s --port %s' % (TestBase.uftrace_cmd, TDIR, self.port) self.recv_p = sp.Popen(recv_cmd.split()) - options = '-H %s -E %s' % ('localhost', 'linux:schedule') + options = '-H %s --port %s -E %s' % ('localhost', self.port, 'linux:schedule') record_cmd = '%s record %s %s' % (TestBase.uftrace_cmd, options, 't-' + self.name) sp.call(record_cmd.split()) return TestBase.TEST_SUCCESS