Fix handling of unexpected failures in subunithelper.
[kai/samba-autobuild/.git] / selftest / subunithelper.py
index 5d2d6658259b27b92b9ff82a7783758d1029ec18..df6eb7c14d84099a53de31f592cdc4eacd49e27e 100644 (file)
@@ -19,20 +19,21 @@ __all__ = ['parse_results']
 
 import re
 import sys
-import subunit
-import subunit.iso8601
-import testtools
+from samba import subunit
+from samba.subunit.run import TestProtocolClient
+from subunit import iso8601
+import unittest
 
-VALID_RESULTS = ['success', 'successful', 'failure', 'fail', 'skip', 'knownfail', 'error', 'xfail', 'skip-testsuite', 'testsuite-failure', 'testsuite-xfail', 'testsuite-success', 'testsuite-error']
+VALID_RESULTS = ['success', 'successful', 'failure', 'fail', 'skip', 'knownfail', 'error', 'xfail', 'skip-testsuite', 'testsuite-failure', 'testsuite-xfail', 'testsuite-success', 'testsuite-error', 'uxsuccess', 'testsuite-uxsuccess']
 
-class TestsuiteEnabledTestResult(testtools.testresult.TestResult):
+class TestsuiteEnabledTestResult(unittest.TestResult):
 
     def start_testsuite(self, name):
         raise NotImplementedError(self.start_testsuite)
 
 
 def parse_results(msg_ops, statistics, fh):
-    expected_fail = 0
+    exitcode = 0
     open_tests = {}
 
     while fh:
@@ -56,7 +57,7 @@ def parse_results(msg_ops, statistics, fh):
         elif command == "time":
             msg_ops.control_msg(l)
             try:
-                dt = subunit.iso8601.parse_date(arg.rstrip("\n"))
+                dt = iso8601.parse_date(arg.rstrip("\n"))
             except TypeError, e:
                 print "Unable to parse time line: %s" % arg.rstrip("\n")
             else:
@@ -95,6 +96,7 @@ def parse_results(msg_ops, statistics, fh):
                     test = open_tests.pop(testname)
                 except KeyError:
                     statistics['TESTS_ERROR']+=1
+                    exitcode = 1
                     msg_ops.addError(subunit.RemotedTestCase(testname), subunit.RemoteError(u"Test was never started"))
                 else:
                     statistics['TESTS_EXPECTED_OK']+=1
@@ -104,19 +106,32 @@ def parse_results(msg_ops, statistics, fh):
                     test = open_tests.pop(testname)
                 except KeyError:
                     statistics['TESTS_ERROR']+=1
+                    exitcode = 1
                     msg_ops.addError(subunit.RemotedTestCase(testname), subunit.RemoteError(u"Test was never started"))
                 else:
                     statistics['TESTS_EXPECTED_FAIL']+=1
                     msg_ops.addExpectedFailure(test, remote_error)
-                    expected_fail+=1
+            elif result in ("uxsuccess", ):
+                try:
+                    test = open_tests.pop(testname)
+                except KeyError:
+                    statistics['TESTS_ERROR']+=1
+                    exitcode = 1
+                    msg_ops.addError(subunit.RemotedTestCase(testname), subunit.RemoteError(u"Test was never started"))
+                else:
+                    statistics['TESTS_UNEXPECTED_OK']+=1
+                    msg_ops.addUnexpectedSuccess(test, remote_error)
+                    exitcode = 1
             elif result in ("failure", "fail"):
                 try:
                     test = open_tests.pop(testname)
                 except KeyError:
                     statistics['TESTS_ERROR']+=1
+                    exitcode = 1
                     msg_ops.addError(subunit.RemotedTestCase(testname), subunit.RemoteError(u"Test was never started"))
                 else:
                     statistics['TESTS_UNEXPECTED_FAIL']+=1
+                    exitcode = 1
                     msg_ops.addFailure(test, remote_error)
             elif result == "skip":
                 statistics['TESTS_SKIP']+=1
@@ -128,6 +143,7 @@ def parse_results(msg_ops, statistics, fh):
                 msg_ops.addSkip(test, reason)
             elif result == "error":
                 statistics['TESTS_ERROR']+=1
+                exitcode = 1
                 try:
                     test = open_tests.pop(testname)
                 except KeyError:
@@ -139,10 +155,15 @@ def parse_results(msg_ops, statistics, fh):
                 msg_ops.end_testsuite(testname, "success", reason)
             elif result == "testsuite-failure":
                 msg_ops.end_testsuite(testname, "failure", reason)
+                exitcode = 1
             elif result == "testsuite-xfail":
                 msg_ops.end_testsuite(testname, "xfail", reason)
+            elif result == "testsuite-uxsuccess":
+                msg_ops.end_testsuite(testname, "uxsuccess", reason)
+                exitcode = 1
             elif result == "testsuite-error":
                 msg_ops.end_testsuite(testname, "error", reason)
+                exitcode = 1
             else:
                 raise AssertionError("Recognized but unhandled result %r" %
                     result)
@@ -165,15 +186,15 @@ def parse_results(msg_ops, statistics, fh):
         test = subunit.RemotedTestCase(open_tests.popitem()[1])
         msg_ops.addError(test, subunit.RemoteError(u"was started but never finished!"))
         statistics['TESTS_ERROR']+=1
+        exitcode = 1
+
+    return exitcode
 
-    if statistics['TESTS_ERROR'] > 0:
-        return 1
-    if statistics['TESTS_UNEXPECTED_FAIL'] > 0:
-        return 1
-    return 0
 
+class SubunitOps(TestProtocolClient,TestsuiteEnabledTestResult):
 
-class SubunitOps(subunit.TestProtocolClient,TestsuiteEnabledTestResult):
+    def progress(self, count, whence):
+        pass
 
     # The following are Samba extensions:
     def start_testsuite(self, name):
@@ -229,7 +250,7 @@ class ImmediateFail(Exception):
         super(ImmediateFail, self).__init__("test failed and fail_immediately set")
 
 
-class FilterOps(testtools.testresult.TestResult):
+class FilterOps(unittest.TestResult):
 
     def control_msg(self, msg):
         pass # We regenerate control messages, so ignore this
@@ -255,55 +276,78 @@ class FilterOps(testtools.testresult.TestResult):
         self._ops.startTest(test)
 
     def _add_prefix(self, test):
+        prefix = ""
+        suffix = ""
         if self.prefix is not None:
-            return subunit.RemotedTestCase(self.prefix + test.id())
-        else:
-            return test
+            prefix = self.prefix
+        if self.suffix is not None:
+            suffix = self.suffix
 
-    def addError(self, test, details=None):
+        return subunit.RemotedTestCase(prefix + test.id() + suffix)
+
+    def addError(self, test, err=None):
         test = self._add_prefix(test)
         self.error_added+=1
         self.total_error+=1
-        self._ops.addError(test, details)
+        self._ops.addError(test, err)
         self.output = None
         if self.fail_immediately:
             raise ImmediateFail()
 
-    def addSkip(self, test, details=None):
+    def addSkip(self, test, reason=None):
         self.seen_output = True
         test = self._add_prefix(test)
-        self._ops.addSkip(test, details)
+        self._ops.addSkip(test, reason)
         self.output = None
 
-    def addExpectedFailure(self, test, details=None):
+    def addExpectedFailure(self, test, err=None):
         test = self._add_prefix(test)
-        self._ops.addExpectedFailure(test, details)
+        self._ops.addExpectedFailure(test, err)
         self.output = None
 
-    def addFailure(self, test, details=None):
+    def addUnexpectedSuccess(self, test, err=None):
+        test = self._add_prefix(test)
+        self.uxsuccess_added+=1
+        self.total_uxsuccess+=1
+        self._ops.addUnexpectedSuccess(test, err)
+        if self.output:
+            self._ops.output_msg(self.output)
+        self.output = None
+        if self.fail_immediately:
+            raise ImmediateFail()
+
+    def addFailure(self, test, err=None):
         test = self._add_prefix(test)
         xfail_reason = find_in_list(self.expected_failures, test.id())
+        if xfail_reason is None:
+            xfail_reason = find_in_list(self.flapping, test.id())
         if xfail_reason is not None:
             self.xfail_added+=1
             self.total_xfail+=1
-            if details is not None:
-                details = subunit.RemoteError(unicode(details[1]) + xfail_reason.decode("utf-8"))
-            else:
-                details = subunit.RemoteError(xfail_reason.decode("utf-8"))
-            self._ops.addExpectedFailure(test, details)
+            self._ops.addExpectedFailure(test, err)
         else:
             self.fail_added+=1
             self.total_fail+=1
-            self._ops.addFailure(test, details)
+            self._ops.addFailure(test, err)
             if self.output:
                 self._ops.output_msg(self.output)
             if self.fail_immediately:
                 raise ImmediateFail()
         self.output = None
 
-    def addSuccess(self, test, details=None):
+    def addSuccess(self, test):
         test = self._add_prefix(test)
-        self._ops.addSuccess(test, details)
+        xfail_reason = find_in_list(self.expected_failures, test.id())
+        if xfail_reason is not None:
+            self.uxsuccess_added += 1
+            self.total_uxsuccess += 1
+            self._ops.addUnexpectedSuccess(test, subunit.RemoteError(xfail_reason))
+            if self.output:
+                self._ops.output_msg(self.output)
+            if self.fail_immediately:
+                raise ImmediateFail()
+        else:
+            self._ops.addSuccess(test)
         self.output = None
 
     def skip_testsuite(self, name, reason=None):
@@ -314,18 +358,25 @@ class FilterOps(testtools.testresult.TestResult):
         self.error_added = 0
         self.fail_added = 0
         self.xfail_added = 0
+        self.uxsuccess_added = 0
 
     def end_testsuite(self, name, result, reason=None):
         xfail = False
 
         if self.xfail_added > 0:
             xfail = True
-        if self.fail_added > 0 or self.error_added > 0:
+        if self.fail_added > 0 or self.error_added > 0 or self.uxsuccess_added > 0:
             xfail = False
 
         if xfail and result in ("fail", "failure"):
             result = "xfail"
 
+        if self.uxsuccess_added > 0 and result != "uxsuccess":
+            result = "uxsuccess"
+            if reason is None:
+                reason = "Subunit/Filter Reason"
+            reason += "\n uxsuccess[%d]" % self.uxsuccess_added
+
         if self.fail_added > 0 and result != "failure":
             result = "failure"
             if reason is None:
@@ -339,23 +390,37 @@ class FilterOps(testtools.testresult.TestResult):
             reason += "\n errors[%d]" % self.error_added
 
         self._ops.end_testsuite(name, result, reason)
+        if result not in ("success", "xfail"):
+            if self.output:
+                self._ops.output_msg(self.output)
+            if self.fail_immediately:
+                raise ImmediateFail()
+        self.output = None
 
-    def __init__(self, out, prefix=None, expected_failures=None,
-                 strip_ok_output=False, fail_immediately=False):
+    def __init__(self, out, prefix=None, suffix=None, expected_failures=None,
+                 strip_ok_output=False, fail_immediately=False,
+                 flapping=None):
         self._ops = out
         self.seen_output = False
         self.output = None
         self.prefix = prefix
+        self.suffix = suffix
         if expected_failures is not None:
             self.expected_failures = expected_failures
         else:
             self.expected_failures = {}
+        if flapping is not None:
+            self.flapping = flapping
+        else:
+            self.flapping = {}
         self.strip_ok_output = strip_ok_output
         self.xfail_added = 0
         self.fail_added = 0
+        self.uxsuccess_added = 0
         self.total_xfail = 0
         self.total_error = 0
         self.total_fail = 0
+        self.total_uxsuccess = 0
         self.error_added = 0
         self.fail_immediately = fail_immediately
 
@@ -414,11 +479,17 @@ class PlainFormatter(TestsuiteEnabledTestResult):
         if not self.verbose:
             self.test_output[name] = ""
 
-        out = "[%d" % self.index
+        total_tests = (self.statistics['TESTS_EXPECTED_OK'] +
+                       self.statistics['TESTS_EXPECTED_FAIL'] +
+                       self.statistics['TESTS_ERROR'] +
+                       self.statistics['TESTS_UNEXPECTED_FAIL'] +
+                       self.statistics['TESTS_UNEXPECTED_OK'])
+
+        out = "[%d(%d)" % (self.index, total_tests)
         if self.totalsuites is not None:
             out += "/%d" % self.totalsuites
         if self.start_time is not None:
-            out += " in " + self._format_time(self.last_time - self.start_time)
+            out += " at " + self._format_time(self.last_time - self.start_time)
         if self.suitesfailed:
             out += ", %d errors" % (len(self.suitesfailed),)
         out += "] %s" % name
@@ -470,19 +541,22 @@ class PlainFormatter(TestsuiteEnabledTestResult):
     def addSuccess(self, test):
         self.end_test(test.id(), "success", False)
 
-    def addError(self, test, details=None):
-        self.end_test(test.id(), "error", True, details)
+    def addError(self, test, err=None):
+        self.end_test(test.id(), "error", True, err)
+
+    def addFailure(self, test, err=None):
+        self.end_test(test.id(), "failure", True, err)
 
-    def addFailure(self, test, details=None):
-        self.end_test(test.id(), "failure", True, details)
+    def addSkip(self, test, reason=None):
+        self.end_test(test.id(), "skip", False, reason)
 
-    def addSkip(self, test, details=None):
-        self.end_test(test.id(), "skip", False, details)
+    def addExpectedFailure(self, test, err=None):
+        self.end_test(test.id(), "xfail", False, err)
 
-    def addExpectedFail(self, test, details=None):
-        self.end_test(test.id(), "xfail", False, details)
+    def addUnexpectedSuccess(self, test):
+        self.end_test(test.id(), "uxsuccess", True)
 
-    def end_test(self, testname, result, unexpected, reason=None):
+    def end_test(self, testname, result, unexpected, err=None):
         if not unexpected:
             self.test_output[self.name] = ""
             if not self.immediate:
@@ -497,17 +571,18 @@ class PlainFormatter(TestsuiteEnabledTestResult):
             self.test_output[self.name] = ""
 
         self.test_output[self.name] += "UNEXPECTED(%s): %s\n" % (result, testname)
-        if reason is not None:
-            self.test_output[self.name] += "REASON: %s\n" % (unicode(reason[1]).encode("utf-8").strip(),)
+        if err is not None:
+            self.test_output[self.name] += "REASON: %s\n" % str(err[1]).strip()
 
         if self.immediate and not self.verbose:
-            print self.test_output[self.name]
+            sys.stdout.write(self.test_output[self.name])
             self.test_output[self.name] = ""
 
         if not self.immediate:
             sys.stdout.write({
                'error': 'E',
                'failure': 'F',
+               'uxsuccess': 'U',
                'success': 'S'}.get(result, "?"))
 
     def write_summary(self, path):
@@ -541,14 +616,16 @@ class PlainFormatter(TestsuiteEnabledTestResult):
 
         if (not self.suitesfailed and
             not self.statistics['TESTS_UNEXPECTED_FAIL'] and
+            not self.statistics['TESTS_UNEXPECTED_OK'] and
             not self.statistics['TESTS_ERROR']):
             ok = (self.statistics['TESTS_EXPECTED_OK'] +
                   self.statistics['TESTS_EXPECTED_FAIL'])
             print "\nALL OK (%d tests in %d testsuites)" % (ok, self.suites_ok)
         else:
-            print "\nFAILED (%d failures and %d errors in %d testsuites)" % (
+            print "\nFAILED (%d failures, %d errors and %d unexpected successes in %d testsuites)" % (
                 self.statistics['TESTS_UNEXPECTED_FAIL'],
                 self.statistics['TESTS_ERROR'],
+                self.statistics['TESTS_UNEXPECTED_OK'],
                 len(self.suitesfailed))
 
     def skip_testsuite(self, name, reason="UNKNOWN"):