From: Douglas Bagnall Date: Tue, 16 Aug 2016 22:56:50 +0000 (+1200) Subject: make perftest: for performance testing X-Git-Tag: tevent-0.9.31~604 X-Git-Url: http://git.samba.org/samba.git/?p=nivanova%2Fsamba-autobuild%2F.git;a=commitdiff_plain;h=e908873757171db5b65296c5c3cdefe7d0fb0c01 make perftest: for performance testing This runs a selection of subunit tests and reduces the output to only the time it takes to run each test. The tests are listed in selftest/perf_tests.py. Signed-off-by: Douglas Bagnall Reviewed-by: Garming Sam --- diff --git a/Makefile b/Makefile index 95681ae9de7..5cc90771000 100644 --- a/Makefile +++ b/Makefile @@ -16,6 +16,9 @@ uninstall: test: $(WAF) test $(TEST_OPTIONS) +perftest: + $(WAF) test --perf-test $(TEST_OPTIONS) + help: @echo NOTE: to run extended waf options use $(WAF_BINARY) or modify your PATH $(WAF) --help diff --git a/selftest/filter-subunit b/selftest/filter-subunit index 857b8420ead..c3aba734ade 100755 --- a/selftest/filter-subunit +++ b/selftest/filter-subunit @@ -44,6 +44,8 @@ parser.add_option("--fail-on-empty", default=False, action="store_true", help="Fail if there was no subunit output") parser.add_option("--list", default=False, action="store_true", help="Operate in list mode") +parser.add_option("--perf-test-output", default=False, + action="store_true", help="orientate output for performance measurement") opts, args = parser.parse_args() if opts.list: @@ -51,6 +53,18 @@ if opts.list: sys.stdout.write("%s%s%s\n" % (opts.prefix, l.rstrip(), opts.suffix)) sys.exit(0) +if opts.perf_test_output: + bad_options = [] + for bad_opt in ('fail_immediately', 'strip_passed_output', + 'flapping', 'expected_failures'): + if getattr(opts, bad_opt): + bad_options.append(bad_opt) + if bad_options: + print >>sys.stderr, ("--perf-test-output is incompatible with --%s" % + (', --'.join(x.replace('_', '-') + for x in bad_options))) + sys.exit(1) + if opts.expected_failures: expected_failures = subunithelper.read_test_regexes(opts.expected_failures) else: @@ -76,10 +90,15 @@ def handle_sigint(sig, stack): signal.signal(signal.SIGINT, handle_sigint) out = subunithelper.SubunitOps(sys.stdout) -msg_ops = subunithelper.FilterOps(out, opts.prefix, opts.suffix, expected_failures, - opts.strip_passed_output, - fail_immediately=opts.fail_immediately, - flapping=flapping) + +if opts.perf_test_output: + msg_ops = subunithelper.PerfFilterOps(out, opts.prefix, opts.suffix) +else: + msg_ops = subunithelper.FilterOps(out, opts.prefix, opts.suffix, + expected_failures, + opts.strip_passed_output, + fail_immediately=opts.fail_immediately, + flapping=flapping) try: ret = subunithelper.parse_results(msg_ops, statistics, sys.stdin) diff --git a/selftest/perf_tests.py b/selftest/perf_tests.py new file mode 100644 index 00000000000..d49bdf4c437 --- /dev/null +++ b/selftest/perf_tests.py @@ -0,0 +1,26 @@ +#!/usr/bin/python + +# This script generates a list of testsuites that should be run to +# test Samba performance. +# +# These tests are not intended to exercise aspect of Samba, but +# perform common simple functions or to ascertain performance. +# + +# The syntax for a testsuite is "-- TEST --" on a single line, followed +# by the name of the test, the environment it needs and the command to run, all +# three separated by newlines. All other lines in the output are considered +# comments. + +from selftesthelpers import * + +samba4srcdir = source4dir() +samba4bindir = bindir() + +plantestsuite_loadlist("samba4.ldap.ad_dc_performance.python(ad_dc_ntvfs)", + "ad_dc_ntvfs", + [python, os.path.join(samba4srcdir, + "dsdb/tests/python/ad_dc_performance.py"), + '$SERVER', '-U"$USERNAME%$PASSWORD"', + '--workgroup=$DOMAIN', + '$LOADLIST', '$LISTOPT']) diff --git a/selftest/subunithelper.py b/selftest/subunithelper.py index a3bb30b69bc..c17036defba 100644 --- a/selftest/subunithelper.py +++ b/selftest/subunithelper.py @@ -17,6 +17,7 @@ __all__ = ['parse_results'] +import datetime import re import sys from samba import subunit @@ -429,6 +430,73 @@ class FilterOps(unittest.TestResult): self.fail_immediately = fail_immediately +class PerfFilterOps(unittest.TestResult): + + def progress(self, delta, whence): + pass + + def output_msg(self, msg): + pass + + def control_msg(self, msg): + pass + + def skip_testsuite(self, name, reason=None): + self._ops.skip_testsuite(name, reason) + + def start_testsuite(self, name): + self.suite_has_time = False + + def end_testsuite(self, name, result, reason=None): + pass + + def _add_prefix(self, test): + return subunit.RemotedTestCase(self.prefix + test.id() + self.suffix) + + def time(self, time): + self.latest_time = time + #self._ops.output_msg("found time %s\n" % time) + self.suite_has_time = True + + def get_time(self): + if self.suite_has_time: + return self.latest_time + return datetime.datetime.utcnow() + + def startTest(self, test): + self.seen_output = True + test = self._add_prefix(test) + self.starts[test.id()] = self.get_time() + + def addSuccess(self, test): + test = self._add_prefix(test) + tid = test.id() + if tid not in self.starts: + self._ops.addError(test, "%s succeeded without ever starting!" % tid) + delta = self.get_time() - self.starts[tid] + self._ops.output_msg("elapsed-time: %s: %f\n" % (tid, delta.total_seconds())) + + def addFailure(self, test, err=''): + tid = test.id() + delta = self.get_time() - self.starts[tid] + self._ops.output_msg("failure: %s failed after %f seconds (%s)\n" % + (tid, delta.total_seconds(), err)) + + def addError(self, test, err=''): + tid = test.id() + delta = self.get_time() - self.starts[tid] + self._ops.output_msg("error: %s failed after %f seconds (%s)\n" % + (tid, delta.total_seconds(), err)) + + def __init__(self, out, prefix='', suffix=''): + self._ops = out + self.prefix = prefix or '' + self.suffix = suffix or '' + self.starts = {} + self.seen_output = False + self.suite_has_time = False + + class PlainFormatter(TestsuiteEnabledTestResult): def __init__(self, verbose, immediate, statistics, diff --git a/selftest/wscript b/selftest/wscript index 61ca0bd767c..5fa0dac457e 100644 --- a/selftest/wscript +++ b/selftest/wscript @@ -79,6 +79,8 @@ def set_options(opt): action="store_true", dest='SOCKET_WRAPPER_KEEP_PCAP', default=False) gr.add_option('--random-order', dest='RANDOM_ORDER', default=False, action="store_true", help="Run testsuites in random order") + gr.add_option('--perf-test', dest='PERF_TEST', default=False, + action="store_true", help="run performance tests only") def configure(conf): conf.env.SELFTEST_PREFIX = Options.options.SELFTEST_PREFIX @@ -145,7 +147,10 @@ def cmd_testonly(opt): env.OPTIONS += ' --socket-wrapper-keep-pcap' if Options.options.RANDOM_ORDER: env.OPTIONS += ' --random-order' - if os.environ.get('RUN_FROM_BUILD_FARM') is not None: + if Options.options.PERF_TEST: + env.FILTER_OPTIONS = ('${PYTHON} -u ${srcdir}/selftest/filter-subunit ' + '--perf-test-output') + elif os.environ.get('RUN_FROM_BUILD_FARM') is not None: env.FILTER_OPTIONS = '${FILTER_XFAIL} --strip-passed-output' else: env.FILTER_OPTIONS = '${FILTER_XFAIL}' @@ -193,9 +198,12 @@ def cmd_testonly(opt): if not os.path.isdir(env.SELFTEST_PREFIX): os.makedirs(env.SELFTEST_PREFIX, int('755', 8)) - env.TESTLISTS = ('--testlist="${PYTHON} ${srcdir}/selftest/tests.py|" ' + - '--testlist="${PYTHON} ${srcdir}/source3/selftest/tests.py|" ' + - '--testlist="${PYTHON} ${srcdir}/source4/selftest/tests.py|"') + if Options.options.PERF_TEST: + env.TESTLISTS = '--testlist="${PYTHON} ${srcdir}/selftest/perf_tests.py|" ' + else: + env.TESTLISTS = ('--testlist="${PYTHON} ${srcdir}/selftest/tests.py|" ' + + '--testlist="${PYTHON} ${srcdir}/source3/selftest/tests.py|" ' + + '--testlist="${PYTHON} ${srcdir}/source4/selftest/tests.py|"') if CONFIG_SET(opt, 'AD_DC_BUILD_IS_ENABLED'): env.SELFTEST_TARGET = "samba"