selftest: Display unexpected successes and expected failures.
authorJelmer Vernooij <jelmer@samba.org>
Sun, 4 Dec 2011 00:55:23 +0000 (01:55 +0100)
committerJelmer Vernooij <jelmer@samba.org>
Mon, 5 Dec 2011 22:11:04 +0000 (23:11 +0100)
selftest/filter-subunit
selftest/subunithelper.py

index ef3172171e864a1c640f135bc53997567f8362d1..1b88575f802acd7da34cc98a6e06371dc889a03b 100755 (executable)
@@ -1,7 +1,22 @@
 #!/usr/bin/env python
 # Filter a subunit stream
 #!/usr/bin/env python
 # Filter a subunit stream
-# Copyright (C) Jelmer Vernooij <jelmer@samba.org>
-# Published under the GNU GPL, v3 or later
+# Copyright (C) 2009-2011 Jelmer Vernooij <jelmer@samba.org>
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+# NOTE: This script is a hack, meant as a placeholder until we can migrate
+# to upstream subunit's filtering tools.
 
 import optparse
 import os
 
 import optparse
 import os
index c29bdbabf0e389746bcd56b2650fcf90452e71cd..b498878c59aef04648d6ea2c6a9c23e043f032ab 100644 (file)
@@ -22,8 +22,9 @@ import sys
 import subunit
 import subunit.iso8601
 import testtools
 import subunit
 import subunit.iso8601
 import testtools
+from testtools import content, content_type
 
 
-VALID_RESULTS = ['success', 'successful', 'failure', 'fail', 'skip', 'knownfail', 'error', 'xfail', 'skip-testsuite', 'testsuite-failure', 'testsuite-xfail', 'testsuite-success', 'testsuite-error']
+VALID_RESULTS = ['success', 'successful', 'failure', 'fail', 'skip', 'knownfail', 'error', 'xfail', 'skip-testsuite', 'testsuite-failure', 'testsuite-xfail', 'testsuite-success', 'testsuite-error', 'uxsuccess']
 
 class TestsuiteEnabledTestResult(testtools.testresult.TestResult):
 
 
 class TestsuiteEnabledTestResult(testtools.testresult.TestResult):
 
@@ -33,7 +34,6 @@ class TestsuiteEnabledTestResult(testtools.testresult.TestResult):
 
 def parse_results(msg_ops, statistics, fh):
     exitcode = 0
 
 def parse_results(msg_ops, statistics, fh):
     exitcode = 0
-    expected_fail = 0
     open_tests = {}
 
     while fh:
     open_tests = {}
 
     while fh:
@@ -111,7 +111,17 @@ def parse_results(msg_ops, statistics, fh):
                 else:
                     statistics['TESTS_EXPECTED_FAIL']+=1
                     msg_ops.addExpectedFailure(test, remote_error)
                 else:
                     statistics['TESTS_EXPECTED_FAIL']+=1
                     msg_ops.addExpectedFailure(test, remote_error)
-                    expected_fail+=1
+            elif result in ("uxsuccess", ):
+                try:
+                    test = open_tests.pop(testname)
+                except KeyError:
+                    statistics['TESTS_ERROR']+=1
+                    exitcode = 1
+                    msg_ops.addError(subunit.RemotedTestCase(testname), subunit.RemoteError(u"Test was never started"))
+                else:
+                    statistics['TESTS_UNEXPECTED_OK']+=1
+                    msg_ops.addUnexpectedSuccess(test, remote_error)
+                    exitcode = 1
             elif result in ("failure", "fail"):
                 try:
                     test = open_tests.pop(testname)
             elif result in ("failure", "fail"):
                 try:
                     test = open_tests.pop(testname)
@@ -285,6 +295,11 @@ class FilterOps(testtools.testresult.TestResult):
         self._ops.addExpectedFailure(test, details)
         self.output = None
 
         self._ops.addExpectedFailure(test, details)
         self.output = None
 
+    def addUnexpectedSuccess(self, test, details=None):
+        test = self._add_prefix(test)
+        self._ops.addUnexpectedSuccess(test, details)
+        self.output = None
+
     def addFailure(self, test, details=None):
         test = self._add_prefix(test)
         xfail_reason = find_in_list(self.expected_failures, test.id())
     def addFailure(self, test, details=None):
         test = self._add_prefix(test)
         xfail_reason = find_in_list(self.expected_failures, test.id())
@@ -314,10 +329,11 @@ class FilterOps(testtools.testresult.TestResult):
         if xfail_reason is not None:
             self.uxsuccess_added += 1
             self.total_uxsuccess += 1
         if xfail_reason is not None:
             self.uxsuccess_added += 1
             self.total_uxsuccess += 1
-            if details is not None:
-                details = subunit.RemoteError(unicode(details[1]) + xfail_reason.decode("utf-8"))
-            else:
-                details = subunit.RemoteError(xfail_reason.decode("utf-8"))
+            if details is None:
+                details = {}
+            details['reason'] = content.Content(
+                content_type.ContentType("text", "plain",
+                    {"charset": "utf8"}), lambda: xfail_reason)
             self._ops.addUnexpectedSuccess(test, details)
             if self.output:
                 self._ops.output_msg(self.output)
             self._ops.addUnexpectedSuccess(test, details)
             if self.output:
                 self._ops.output_msg(self.output)
@@ -508,10 +524,13 @@ class PlainFormatter(TestsuiteEnabledTestResult):
     def addSkip(self, test, details=None):
         self.end_test(test.id(), "skip", False, details)
 
     def addSkip(self, test, details=None):
         self.end_test(test.id(), "skip", False, details)
 
-    def addExpectedFail(self, test, details=None):
+    def addExpectedFailure(self, test, details=None):
         self.end_test(test.id(), "xfail", False, details)
 
         self.end_test(test.id(), "xfail", False, details)
 
-    def end_test(self, testname, result, unexpected, reason=None):
+    def addUnexpectedSuccess(self, test, details=None):
+        self.end_test(test.id(), "uxsuccess", True, details)
+
+    def end_test(self, testname, result, unexpected, details=None):
         if not unexpected:
             self.test_output[self.name] = ""
             if not self.immediate:
         if not unexpected:
             self.test_output[self.name] = ""
             if not self.immediate:
@@ -526,17 +545,18 @@ class PlainFormatter(TestsuiteEnabledTestResult):
             self.test_output[self.name] = ""
 
         self.test_output[self.name] += "UNEXPECTED(%s): %s\n" % (result, testname)
             self.test_output[self.name] = ""
 
         self.test_output[self.name] += "UNEXPECTED(%s): %s\n" % (result, testname)
-        if reason is not None:
-            self.test_output[self.name] += "REASON: %s\n" % (unicode(reason[1]).encode("utf-8").strip(),)
+        if details is not None:
+            self.test_output[self.name] += "REASON: %s\n" % (unicode(details[1]).encode("utf-8").strip(),)
 
         if self.immediate and not self.verbose:
 
         if self.immediate and not self.verbose:
-            print self.test_output[self.name]
+            sys.stdout.write(self.test_output[self.name])
             self.test_output[self.name] = ""
 
         if not self.immediate:
             sys.stdout.write({
                'error': 'E',
                'failure': 'F',
             self.test_output[self.name] = ""
 
         if not self.immediate:
             sys.stdout.write({
                'error': 'E',
                'failure': 'F',
+               'uxsuccess': 'U',
                'success': 'S'}.get(result, "?"))
 
     def write_summary(self, path):
                'success': 'S'}.get(result, "?"))
 
     def write_summary(self, path):
@@ -570,14 +590,16 @@ class PlainFormatter(TestsuiteEnabledTestResult):
 
         if (not self.suitesfailed and
             not self.statistics['TESTS_UNEXPECTED_FAIL'] and
 
         if (not self.suitesfailed and
             not self.statistics['TESTS_UNEXPECTED_FAIL'] and
+            not self.statistics['TESTS_UNEXPECTED_OK'] and
             not self.statistics['TESTS_ERROR']):
             ok = (self.statistics['TESTS_EXPECTED_OK'] +
                   self.statistics['TESTS_EXPECTED_FAIL'])
             print "\nALL OK (%d tests in %d testsuites)" % (ok, self.suites_ok)
         else:
             not self.statistics['TESTS_ERROR']):
             ok = (self.statistics['TESTS_EXPECTED_OK'] +
                   self.statistics['TESTS_EXPECTED_FAIL'])
             print "\nALL OK (%d tests in %d testsuites)" % (ok, self.suites_ok)
         else:
-            print "\nFAILED (%d failures and %d errors in %d testsuites)" % (
+            print "\nFAILED (%d failures, %d errors and %d unexpected successes in %d testsuites)" % (
                 self.statistics['TESTS_UNEXPECTED_FAIL'],
                 self.statistics['TESTS_ERROR'],
                 self.statistics['TESTS_UNEXPECTED_FAIL'],
                 self.statistics['TESTS_ERROR'],
+                self.statistics['TESTS_UNEXPECTED_OK'],
                 len(self.suitesfailed))
 
     def skip_testsuite(self, name, reason="UNKNOWN"):
                 len(self.suitesfailed))
 
     def skip_testsuite(self, name, reason="UNKNOWN"):