s3:services_db: rewrite svcctl_set_secdesc() using reg_api calls instead of legacy
[amitay/samba.git] / selftest / subunithelper.py
index 33287412468da807e3404e0fd40811c11e2c53b3..4a649c3b5fb7d842a6a553e3571f72ba2c43f765 100644 (file)
@@ -33,7 +33,7 @@ class TestsuiteEnabledTestResult(testtools.testresult.TestResult):
 
 def parse_results(msg_ops, statistics, fh):
     expected_fail = 0
-    open_tests = []
+    open_tests = {}
 
     while fh:
         l = fh.readline()
@@ -47,8 +47,12 @@ def parse_results(msg_ops, statistics, fh):
         arg = parts[1]
         if command in ("test", "testing"):
             msg_ops.control_msg(l)
-            msg_ops.startTest(subunit.RemotedTestCase(arg.rstrip()))
-            open_tests.append(arg.rstrip())
+            name = arg.rstrip()
+            test = subunit.RemotedTestCase(name)
+            if name in open_tests:
+                msg_ops.addError(open_tests.pop(name), subunit.RemoteError(u"Test already running"))
+            msg_ops.startTest(test)
+            open_tests[name] = test
         elif command == "time":
             msg_ops.control_msg(l)
             try:
@@ -77,58 +81,58 @@ def parse_results(msg_ops, statistics, fh):
                     else:
                         reason += l
 
+                remote_error = subunit.RemoteError(reason.decode("utf-8"))
+
                 if not terminated:
                     statistics['TESTS_ERROR']+=1
-                    msg_ops.end_test(testname, "error", True, 
-                                       "reason (%s) interrupted" % result)
+                    msg_ops.addError(subunit.RemotedTestCase(testname), subunit.RemoteError(u"reason (%s) interrupted" % result))
                     return 1
             else:
                 reason = None
+                remote_error = subunit.RemoteError(u"No reason specified")
             if result in ("success", "successful"):
                 try:
-                    open_tests.remove(testname)
-                except ValueError:
+                    test = open_tests.pop(testname)
+                except KeyError:
                     statistics['TESTS_ERROR']+=1
-                    msg_ops.end_test(testname, "error", True, 
-                            "Test was never started")
+                    msg_ops.addError(subunit.RemotedTestCase(testname), subunit.RemoteError(u"Test was never started"))
                 else:
                     statistics['TESTS_EXPECTED_OK']+=1
-                    msg_ops.end_test(testname, "success", False, reason)
+                    msg_ops.addSuccess(test)
             elif result in ("xfail", "knownfail"):
                 try:
-                    open_tests.remove(testname)
-                except ValueError:
+                    test = open_tests.pop(testname)
+                except KeyError:
                     statistics['TESTS_ERROR']+=1
-                    msg_ops.end_test(testname, "error", True, 
-                            "Test was never started")
+                    msg_ops.addError(subunit.RemotedTestCase(testname), subunit.RemoteError(u"Test was never started"))
                 else:
                     statistics['TESTS_EXPECTED_FAIL']+=1
-                    msg_ops.end_test(testname, "xfail", False, reason)
+                    msg_ops.addExpectedFailure(test, remote_error)
                     expected_fail+=1
             elif result in ("failure", "fail"):
                 try:
-                    open_tests.remove(testname)
-                except ValueError:
+                    test = open_tests.pop(testname)
+                except KeyError:
                     statistics['TESTS_ERROR']+=1
-                    msg_ops.end_test(testname, "error", True, 
-                            "Test was never started")
+                    msg_ops.addError(subunit.RemotedTestCase(testname), subunit.RemoteError(u"Test was never started"))
                 else:
                     statistics['TESTS_UNEXPECTED_FAIL']+=1
-                    msg_ops.end_test(testname, "failure", True, reason)
+                    msg_ops.addFailure(test, remote_error)
             elif result == "skip":
                 statistics['TESTS_SKIP']+=1
                 # Allow tests to be skipped without prior announcement of test
-                last = open_tests.pop()
-                if last is not None and last != testname:
-                    open_tests.append(testname)
-                msg_ops.end_test(testname, "skip", False, reason)
+                try:
+                    test = open_tests.pop(testname)
+                except KeyError:
+                    test = subunit.RemotedTestCase(testname)
+                msg_ops.addSkip(test, reason)
             elif result == "error":
                 statistics['TESTS_ERROR']+=1
                 try:
-                    open_tests.remove(testname)
-                except ValueError:
-                    pass
-                msg_ops.end_test(testname, "error", True, reason)
+                    test = open_tests.pop(testname)
+                except KeyError:
+                    test = subunit.RemotedTestCase(testname)
+                msg_ops.addError(test, remote_error)
             elif result == "skip-testsuite":
                 msg_ops.skip_testsuite(testname)
             elif result == "testsuite-success":
@@ -158,37 +162,19 @@ def parse_results(msg_ops, statistics, fh):
             msg_ops.output_msg(l)
 
     while open_tests:
-        msg_ops.end_test(open_tests.pop(), "error", True,
-                   "was started but never finished!")
+        test = subunit.RemotedTestCase(open_tests.popitem()[1])
+        msg_ops.addError(test, subunit.RemoteError(u"was started but never finished!"))
         statistics['TESTS_ERROR']+=1
 
     if statistics['TESTS_ERROR'] > 0:
         return 1
     if statistics['TESTS_UNEXPECTED_FAIL'] > 0:
-        return 1 
+        return 1
     return 0
 
 
 class SubunitOps(subunit.TestProtocolClient,TestsuiteEnabledTestResult):
 
-    def end_test(self, name, result, reason=None):
-        if reason:
-            self._stream.write("%s: %s [\n%s\n]\n" % (result, name, reason))
-        else:
-            self._stream.write("%s: %s\n" % (result, name))
-
-    def skip_test(self, name, reason=None):
-        self.end_test(name, "skip", reason)
-
-    def fail_test(self, name, reason=None):
-        self.end_test(name, "fail", reason)
-
-    def success_test(self, name, reason=None):
-        self.end_test(name, "success", reason)
-
-    def xfail_test(self, name, reason=None):
-        self.end_test(name, "xfail", reason)
-
     # The following are Samba extensions:
     def start_testsuite(self, name):
         self._stream.write("testsuite: %s\n" % name)
@@ -205,6 +191,9 @@ class SubunitOps(subunit.TestProtocolClient,TestsuiteEnabledTestResult):
         else:
             self._stream.write("testsuite-%s: %s\n" % (result, name))
 
+    def output_msg(self, msg):
+        self._stream.write(msg)
+
 
 def read_test_regexes(name):
     ret = {}
@@ -251,43 +240,58 @@ class FilterOps(testtools.testresult.TestResult):
             self.output+=msg
 
     def startTest(self, test):
-        if self.prefix is not None:
-            test = subunit.RemotedTestCase(self.prefix + test.id())
-
+        test = self._add_prefix(test)
         if self.strip_ok_output:
            self.output = ""
 
         self._ops.startTest(test)
 
-    def end_test(self, testname, result, unexpected, reason):
+    def _add_prefix(self, test):
         if self.prefix is not None:
-            testname = self.prefix + testname
+            return subunit.RemotedTestCase(self.prefix + test.id())
+        else:
+            return test
 
-        if result in ("fail", "failure") and not unexpected:
-            result = "xfail"
-            self.xfail_added+=1
-            self.total_xfail+=1
-        xfail_reason = find_in_list(self.expected_failures, testname)
-        if xfail_reason is not None and result in ("fail", "failure"):
-            result = "xfail"
+    def addError(self, test, details=None):
+        test = self._add_prefix(test)
+        self.error_added+=1
+        self.total_error+=1
+        self._ops.addError(test, details)
+        self.output = None
+
+    def addSkip(self, test, details=None):
+        test = self._add_prefix(test)
+        self._ops.addSkip(test, details)
+        self.output = None
+
+    def addExpectedFailure(self, test, details=None):
+        test = self._add_prefix(test)
+        self._ops.addExpectedFailure(test, details)
+        self.output = None
+
+    def addFailure(self, test, details=None):
+        test = self._add_prefix(test)
+        xfail_reason = find_in_list(self.expected_failures, test.id())
+        if xfail_reason is not None:
             self.xfail_added+=1
             self.total_xfail+=1
-            reason += xfail_reason
-
-        if result in ("fail", "failure"):
+            if details is not None:
+                details = subunit.RemoteError(details[1].message + xfail_reason.decode("utf-8"))
+            else:
+                details = subunit.RemoteError(xfail_reason.decode("utf-8"))
+            self._ops.addExpectedFailure(test, details)
+        else:
             self.fail_added+=1
             self.total_fail+=1
-
-        if result == "error":
-            self.error_added+=1
-            self.total_error+=1
-
-        if self.strip_ok_output:
-            if result not in ("success", "xfail", "skip"):
-                print self.output
+            self._ops.addFailure(test, details)
+            if self.output:
+                self._ops.output_msg(self.output)
         self.output = None
 
-        self._ops.end_test(testname, result, reason)
+    def addSuccess(self, test, details=None):
+        test = self._add_prefix(test)
+        self._ops.addSuccess(test, details)
+        self.output = None
 
     def skip_testsuite(self, name, reason=None):
         self._ops.skip_testsuite(name, reason)