Tests: add minimal pytest support
authorPeter Wu <peter@lekensteyn.nl>
Fri, 6 Jul 2018 15:34:01 +0000 (17:34 +0200)
committerAnders Broman <a.broman58@gmail.com>
Tue, 17 Jul 2018 09:04:01 +0000 (09:04 +0000)
pytest is a powerful test framework. The initial motivation is to have
much better detailed reporting when a test fails, see for example
https://docs.pytest.org/en/latest/

Additionally, it has full parallelization support at the test level (via
the pytest-xdist plugin) instead of being limited to the suite level
(which currently has to be hard-coded via CMakeLists.txt).

Usage with the build dir in /tmp/wsbuild and src dir in /tmp/wireshark:

    export WS_BIN_PATH=/tmp/wsbuild/run
    pytest /tmp/wireshark/tests

For parallelization support and verbose printing:

    pip install pytest-xdist
    pytest -nauto -v /tmp/wireshark/tests

To limit yourself to a case based on a pattern:

    pytest -nauto -v /tmp/wireshark/tests -k test_unit_ctest_coverage

Tested on Arch Linux with Python 3.6.5, pytest-3.6.2, xdist-1.22.2.
pytest -n8 finished in 82 seconds while ctest -j8 required 87 seconds.

Change-Id: I832f4dd9f988d6656df795327e81610accf54b9f
Reviewed-on: https://code.wireshark.org/review/28651
Reviewed-by: Gerald Combs <gerald@wireshark.org>
Reviewed-by: Anders Broman <a.broman58@gmail.com>
test/conftest.py [new file with mode: 0644]
test/pytest.ini [new file with mode: 0644]
test/subprocesstest.py

diff --git a/test/conftest.py b/test/conftest.py
new file mode 100644 (file)
index 0000000..eff63da
--- /dev/null
@@ -0,0 +1,34 @@
+#
+# -*- coding: utf-8 -*-
+# Wireshark tests
+#
+# Copyright (c) 2018 Peter Wu <peter@lekensteyn.nl>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+'''py.test configuration'''
+
+import os
+import sys
+import config
+
+
+# XXX remove globals in config and create py.test-specific fixtures
+try:
+    _program_path = os.environ['WS_BIN_PATH']
+except KeyError:
+    print('Please set env var WS_BIN_PATH to the run directory with binaries')
+    sys.exit(1)
+if not config.setProgramPath(_program_path):
+    print('One or more required executables not found at {}\n'.format(_program_path))
+    sys.exit(1)
+
+# this is set only to please case_unittests.test_unit_ctest_coverage
+def pytest_collection_modifyitems(items):
+    '''Find all test groups.'''
+    suites = []
+    for item in items:
+        name = item.nodeid.split("::")[0].replace(".py", "").replace("/", ".")
+        if name not in suites:
+            suites.append(name)
+    config.all_groups = list(sorted(suites))
diff --git a/test/pytest.ini b/test/pytest.ini
new file mode 100644 (file)
index 0000000..129efd7
--- /dev/null
@@ -0,0 +1,2 @@
+[pytest]
+python_files=suite_*.py group_*.py
index 9815f230e802a96c8e39874f05f76764058ecbbf..2f05137bbfa0fc3476355f49d1214d6c048ad08a 100644 (file)
@@ -146,6 +146,17 @@ class SubprocessTestCase(unittest.TestCase):
             except:
                 pass
 
+    def _error_count(self, result):
+        if not result:
+            return 0
+        if hasattr(result, 'failures'):
+            # Python standard unittest runner
+            return len(result.failures) + len(result.errors)
+        if hasattr(result, '_excinfo'):
+            # pytest test runner
+            return len(result._excinfo or [])
+        self.fail("Unexpected test result %r" % result)
+
     def run(self, result=None):
         # Subclass run() so that we can do the following:
         # - Open our log file and add it to the cleanup list.
@@ -162,9 +173,7 @@ class SubprocessTestCase(unittest.TestCase):
         # to handle line endings in the future.
         self.log_fd = io.open(self.log_fname, 'w', encoding='UTF-8', newline='\n')
         self.cleanup_files.append(self.log_fname)
-        pre_run_problem_count = 0
-        if result:
-            pre_run_problem_count = len(result.failures) + len(result.errors)
+        pre_run_problem_count = self._error_count(result)
         try:
             super(SubprocessTestCase, self).run(result=result)
         except KeyboardInterrupt:
@@ -176,7 +185,7 @@ class SubprocessTestCase(unittest.TestCase):
         self.kill_processes()
         self.log_fd.close()
         if result:
-            post_run_problem_count = len(result.failures) + len(result.errors)
+            post_run_problem_count = self._error_count(result)
             if pre_run_problem_count != post_run_problem_count:
                 self.dump_files.append(self.log_fname)
                 # Leave some evidence behind.