make perftest: for performance testing
authorDouglas Bagnall <douglas.bagnall@catalyst.net.nz>
Tue, 16 Aug 2016 22:56:50 +0000 (10:56 +1200)
committerDouglas Bagnall <dbagnall@samba.org>
Wed, 31 Aug 2016 05:09:26 +0000 (07:09 +0200)
This runs a selection of subunit tests and reduces the output to only
the time it takes to run each test.

The tests are listed in selftest/perf_tests.py.

Signed-off-by: Douglas Bagnall <douglas.bagnall@catalyst.net.nz>
Reviewed-by: Garming Sam <garming@catalyst.net.nz>
Makefile
selftest/filter-subunit
selftest/perf_tests.py [new file with mode: 0644]
selftest/subunithelper.py
selftest/wscript

index 95681ae9de79af20bf25f47125975a07ac9ff1a0..5cc907710001b47fd459c5edbf6a56647d5d7486 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -16,6 +16,9 @@ uninstall:
 test:
        $(WAF) test $(TEST_OPTIONS)
 
+perftest:
+       $(WAF) test --perf-test $(TEST_OPTIONS)
+
 help:
        @echo NOTE: to run extended waf options use $(WAF_BINARY) or modify your PATH
        $(WAF) --help
index 857b8420eade5ad62c08938d06f316eab6f42806..c3aba734ade2f9fd625b02d36468cf54680baded 100755 (executable)
@@ -44,6 +44,8 @@ parser.add_option("--fail-on-empty", default=False,
     action="store_true", help="Fail if there was no subunit output")
 parser.add_option("--list", default=False,
     action="store_true", help="Operate in list mode")
+parser.add_option("--perf-test-output", default=False,
+    action="store_true", help="orientate output for performance measurement")
 opts, args = parser.parse_args()
 
 if opts.list:
@@ -51,6 +53,18 @@ if opts.list:
          sys.stdout.write("%s%s%s\n" % (opts.prefix, l.rstrip(), opts.suffix))
     sys.exit(0)
 
+if opts.perf_test_output:
+    bad_options = []
+    for bad_opt in ('fail_immediately', 'strip_passed_output',
+                    'flapping', 'expected_failures'):
+        if getattr(opts, bad_opt):
+            bad_options.append(bad_opt)
+    if bad_options:
+        print >>sys.stderr, ("--perf-test-output is incompatible with --%s" %
+                             (', --'.join(x.replace('_', '-')
+                                          for x in bad_options)))
+        sys.exit(1)
+
 if opts.expected_failures:
     expected_failures = subunithelper.read_test_regexes(opts.expected_failures)
 else:
@@ -76,10 +90,15 @@ def handle_sigint(sig, stack):
 signal.signal(signal.SIGINT, handle_sigint)
 
 out = subunithelper.SubunitOps(sys.stdout)
-msg_ops = subunithelper.FilterOps(out, opts.prefix, opts.suffix, expected_failures,
-    opts.strip_passed_output,
-    fail_immediately=opts.fail_immediately,
-    flapping=flapping)
+
+if opts.perf_test_output:
+    msg_ops = subunithelper.PerfFilterOps(out, opts.prefix, opts.suffix)
+else:
+    msg_ops = subunithelper.FilterOps(out, opts.prefix, opts.suffix,
+                                      expected_failures,
+                                      opts.strip_passed_output,
+                                      fail_immediately=opts.fail_immediately,
+                                      flapping=flapping)
 
 try:
     ret = subunithelper.parse_results(msg_ops, statistics, sys.stdin)
diff --git a/selftest/perf_tests.py b/selftest/perf_tests.py
new file mode 100644 (file)
index 0000000..d49bdf4
--- /dev/null
@@ -0,0 +1,26 @@
+#!/usr/bin/python
+
+# This script generates a list of testsuites that should be run to
+# test Samba performance.
+#
+# These tests are not intended to exercise aspect of Samba, but
+# perform common simple functions or to ascertain performance.
+#
+
+# The syntax for a testsuite is "-- TEST --" on a single line, followed
+# by the name of the test, the environment it needs and the command to run, all
+# three separated by newlines. All other lines in the output are considered
+# comments.
+
+from selftesthelpers import *
+
+samba4srcdir = source4dir()
+samba4bindir = bindir()
+
+plantestsuite_loadlist("samba4.ldap.ad_dc_performance.python(ad_dc_ntvfs)",
+                       "ad_dc_ntvfs",
+                       [python, os.path.join(samba4srcdir,
+                                             "dsdb/tests/python/ad_dc_performance.py"),
+                        '$SERVER', '-U"$USERNAME%$PASSWORD"',
+                        '--workgroup=$DOMAIN',
+                        '$LOADLIST', '$LISTOPT'])
index a3bb30b69bcdd536bf6b9019cc080e0de1466662..c17036defbab6a24925d753bfeae7485257da4b9 100644 (file)
@@ -17,6 +17,7 @@
 
 __all__ = ['parse_results']
 
+import datetime
 import re
 import sys
 from samba import subunit
@@ -429,6 +430,73 @@ class FilterOps(unittest.TestResult):
         self.fail_immediately = fail_immediately
 
 
+class PerfFilterOps(unittest.TestResult):
+
+    def progress(self, delta, whence):
+        pass
+
+    def output_msg(self, msg):
+        pass
+
+    def control_msg(self, msg):
+        pass
+
+    def skip_testsuite(self, name, reason=None):
+        self._ops.skip_testsuite(name, reason)
+
+    def start_testsuite(self, name):
+        self.suite_has_time = False
+
+    def end_testsuite(self, name, result, reason=None):
+        pass
+
+    def _add_prefix(self, test):
+        return subunit.RemotedTestCase(self.prefix + test.id() + self.suffix)
+
+    def time(self, time):
+        self.latest_time = time
+        #self._ops.output_msg("found time %s\n" % time)
+        self.suite_has_time = True
+
+    def get_time(self):
+        if self.suite_has_time:
+            return self.latest_time
+        return datetime.datetime.utcnow()
+
+    def startTest(self, test):
+        self.seen_output = True
+        test = self._add_prefix(test)
+        self.starts[test.id()] = self.get_time()
+
+    def addSuccess(self, test):
+        test = self._add_prefix(test)
+        tid = test.id()
+        if tid not in self.starts:
+            self._ops.addError(test, "%s succeeded without ever starting!" % tid)
+        delta = self.get_time() - self.starts[tid]
+        self._ops.output_msg("elapsed-time: %s: %f\n" % (tid, delta.total_seconds()))
+
+    def addFailure(self, test, err=''):
+        tid = test.id()
+        delta = self.get_time() - self.starts[tid]
+        self._ops.output_msg("failure: %s failed after %f seconds (%s)\n" %
+                             (tid, delta.total_seconds(), err))
+
+    def addError(self, test, err=''):
+        tid = test.id()
+        delta = self.get_time() - self.starts[tid]
+        self._ops.output_msg("error: %s failed after %f seconds (%s)\n" %
+                             (tid, delta.total_seconds(), err))
+
+    def __init__(self, out, prefix='', suffix=''):
+        self._ops = out
+        self.prefix = prefix or ''
+        self.suffix = suffix or ''
+        self.starts = {}
+        self.seen_output = False
+        self.suite_has_time = False
+
+
 class PlainFormatter(TestsuiteEnabledTestResult):
 
     def __init__(self, verbose, immediate, statistics,
index 61ca0bd767cf5c6bae20cee316912e7918f8fbee..5fa0dac457e83af3da95b9acf1a1fa791ddcf635 100644 (file)
@@ -79,6 +79,8 @@ def set_options(opt):
                   action="store_true", dest='SOCKET_WRAPPER_KEEP_PCAP', default=False)
     gr.add_option('--random-order', dest='RANDOM_ORDER', default=False,
                   action="store_true", help="Run testsuites in random order")
+    gr.add_option('--perf-test', dest='PERF_TEST', default=False,
+                  action="store_true", help="run performance tests only")
 
 def configure(conf):
     conf.env.SELFTEST_PREFIX = Options.options.SELFTEST_PREFIX
@@ -145,7 +147,10 @@ def cmd_testonly(opt):
         env.OPTIONS += ' --socket-wrapper-keep-pcap'
     if Options.options.RANDOM_ORDER:
         env.OPTIONS += ' --random-order'
-    if os.environ.get('RUN_FROM_BUILD_FARM') is not None:
+    if Options.options.PERF_TEST:
+        env.FILTER_OPTIONS = ('${PYTHON} -u ${srcdir}/selftest/filter-subunit '
+                              '--perf-test-output')
+    elif os.environ.get('RUN_FROM_BUILD_FARM') is not None:
         env.FILTER_OPTIONS = '${FILTER_XFAIL} --strip-passed-output'
     else:
         env.FILTER_OPTIONS = '${FILTER_XFAIL}'
@@ -193,9 +198,12 @@ def cmd_testonly(opt):
     if not os.path.isdir(env.SELFTEST_PREFIX):
         os.makedirs(env.SELFTEST_PREFIX, int('755', 8))
 
-    env.TESTLISTS = ('--testlist="${PYTHON} ${srcdir}/selftest/tests.py|" ' +
-                     '--testlist="${PYTHON} ${srcdir}/source3/selftest/tests.py|" ' +
-                     '--testlist="${PYTHON} ${srcdir}/source4/selftest/tests.py|"')
+    if Options.options.PERF_TEST:
+        env.TESTLISTS = '--testlist="${PYTHON} ${srcdir}/selftest/perf_tests.py|" '
+    else:
+        env.TESTLISTS = ('--testlist="${PYTHON} ${srcdir}/selftest/tests.py|" ' +
+                         '--testlist="${PYTHON} ${srcdir}/source3/selftest/tests.py|" ' +
+                         '--testlist="${PYTHON} ${srcdir}/source4/selftest/tests.py|"')
 
     if CONFIG_SET(opt, 'AD_DC_BUILD_IS_ENABLED'):
         env.SELFTEST_TARGET = "samba"