From 0ce2cdda8bb4c35444ee91f1e0c82f8ef700682b Mon Sep 17 00:00:00 2001 From: Peter Wu Date: Fri, 6 Jul 2018 17:34:01 +0200 Subject: [PATCH] Tests: add minimal pytest support pytest is a powerful test framework. The initial motivation is to have much better detailed reporting when a test fails, see for example https://docs.pytest.org/en/latest/ Additionally, it has full parallelization support at the test level (via the pytest-xdist plugin) instead of being limited to the suite level (which currently has to be hard-coded via CMakeLists.txt). Usage with the build dir in /tmp/wsbuild and src dir in /tmp/wireshark: export WS_BIN_PATH=/tmp/wsbuild/run pytest /tmp/wireshark/tests For parallelization support and verbose printing: pip install pytest-xdist pytest -nauto -v /tmp/wireshark/tests To limit yourself to a case based on a pattern: pytest -nauto -v /tmp/wireshark/tests -k test_unit_ctest_coverage Tested on Arch Linux with Python 3.6.5, pytest-3.6.2, xdist-1.22.2. pytest -n8 finished in 82 seconds while ctest -j8 required 87 seconds. Change-Id: I832f4dd9f988d6656df795327e81610accf54b9f Reviewed-on: https://code.wireshark.org/review/28651 Reviewed-by: Gerald Combs Reviewed-by: Anders Broman --- test/conftest.py | 34 ++++++++++++++++++++++++++++++++++ test/pytest.ini | 2 ++ test/subprocesstest.py | 17 +++++++++++++---- 3 files changed, 49 insertions(+), 4 deletions(-) create mode 100644 test/conftest.py create mode 100644 test/pytest.ini diff --git a/test/conftest.py b/test/conftest.py new file mode 100644 index 0000000000..eff63dadaa --- /dev/null +++ b/test/conftest.py @@ -0,0 +1,34 @@ +# +# -*- coding: utf-8 -*- +# Wireshark tests +# +# Copyright (c) 2018 Peter Wu +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +'''py.test configuration''' + +import os +import sys +import config + + +# XXX remove globals in config and create py.test-specific fixtures +try: + _program_path = os.environ['WS_BIN_PATH'] +except KeyError: + print('Please set env var WS_BIN_PATH to the run directory with binaries') + sys.exit(1) +if not config.setProgramPath(_program_path): + print('One or more required executables not found at {}\n'.format(_program_path)) + sys.exit(1) + +# this is set only to please case_unittests.test_unit_ctest_coverage +def pytest_collection_modifyitems(items): + '''Find all test groups.''' + suites = [] + for item in items: + name = item.nodeid.split("::")[0].replace(".py", "").replace("/", ".") + if name not in suites: + suites.append(name) + config.all_groups = list(sorted(suites)) diff --git a/test/pytest.ini b/test/pytest.ini new file mode 100644 index 0000000000..129efd7545 --- /dev/null +++ b/test/pytest.ini @@ -0,0 +1,2 @@ +[pytest] +python_files=suite_*.py group_*.py diff --git a/test/subprocesstest.py b/test/subprocesstest.py index 9815f230e8..2f05137bbf 100644 --- a/test/subprocesstest.py +++ b/test/subprocesstest.py @@ -146,6 +146,17 @@ class SubprocessTestCase(unittest.TestCase): except: pass + def _error_count(self, result): + if not result: + return 0 + if hasattr(result, 'failures'): + # Python standard unittest runner + return len(result.failures) + len(result.errors) + if hasattr(result, '_excinfo'): + # pytest test runner + return len(result._excinfo or []) + self.fail("Unexpected test result %r" % result) + def run(self, result=None): # Subclass run() so that we can do the following: # - Open our log file and add it to the cleanup list. @@ -162,9 +173,7 @@ class SubprocessTestCase(unittest.TestCase): # to handle line endings in the future. self.log_fd = io.open(self.log_fname, 'w', encoding='UTF-8', newline='\n') self.cleanup_files.append(self.log_fname) - pre_run_problem_count = 0 - if result: - pre_run_problem_count = len(result.failures) + len(result.errors) + pre_run_problem_count = self._error_count(result) try: super(SubprocessTestCase, self).run(result=result) except KeyboardInterrupt: @@ -176,7 +185,7 @@ class SubprocessTestCase(unittest.TestCase): self.kill_processes() self.log_fd.close() if result: - post_run_problem_count = len(result.failures) + len(result.errors) + post_run_problem_count = self._error_count(result) if pre_run_problem_count != post_run_problem_count: self.dump_files.append(self.log_fname) # Leave some evidence behind. -- 2.34.1