selftest: Convert format-subunit to Python.
authorJelmer Vernooij <jelmer@samba.org>
Mon, 29 Mar 2010 20:25:25 +0000 (22:25 +0200)
committerJelmer Vernooij <jelmer@samba.org>
Mon, 29 Mar 2010 23:08:20 +0000 (01:08 +0200)
selftest/format-subunit
selftest/subunithelper.py [new file with mode: 0644]

index 2224b71..1967fb4 100755 (executable)
@@ -1,77 +1,38 @@
-#!/usr/bin/perl
+#!/usr/bin/env python
 # Pretty-format subunit output
-# Copyright (C) Jelmer Vernooij <jelmer@samba.org>
+# Copyright (C) 2008-2010 Jelmer Vernooij <jelmer@samba.org>
 # Published under the GNU GPL, v3 or later
 
-=pod
+import optparse
+import os
+import sys
 
-=head1 NAME
+import subunithelper
 
-format-subunit [--immediate] < instream > outstream
+parser = optparse.OptionParser("format-subunit [options]")
+parser.add_option("--verbose", action="store_true",
+       help="Be verbose")
+parser.add_option("--immediate", action="store_true", 
+       help="Show failures immediately, don't wait until test run has finished")
+parser.add_option("--prefix", type="string", default=".",
+       help="Prefix to write summary to")
 
-=head1 SYNOPSIS
+opts, args = parser.parse_args()
 
-Format the output of a subunit stream.
+statistics = {
+       'SUITES_FAIL': 0,
+       'TESTS_UNEXPECTED_OK': 0,
+       'TESTS_EXPECTED_OK': 0,
+       'TESTS_UNEXPECTED_FAIL': 0,
+       'TESTS_EXPECTED_FAIL': 0,
+       'TESTS_ERROR': 0,
+       'TESTS_SKIP': 0,
+}
 
-=head1 OPTIONS
+msg_ops = PlainFormatter(os.path.join(opts.prefix, "summary"), opts.verbose, opts.immediate, statistics)
 
-=over 4
+expected_ret = subunithelper.parse_results(msg_ops, statistics, sys.stdin)
 
-=item I<--immediate>
+msg_ops.summary()
 
-Show errors as soon as they happen rather than at the end of the test run.
-
-=head1 LICENSE
-
-GNU General Public License, version 3 or later.
-
-=head1 AUTHOR
-
-Jelmer Vernooij <jelmer@samba.org>
-               
-=cut
-
-use Getopt::Long;
-use strict;
-use FindBin qw($RealBin $Script);
-use lib "$RealBin";
-use Subunit qw(parse_results);
-
-my $opt_help = undef;
-my $opt_verbose = 0;
-my $opt_immediate = 0;
-my $opt_prefix = ".";
-
-my $result = GetOptions (
-               'help|h|?' => \$opt_help,
-               'verbose' => \$opt_verbose,
-               'immediate' => \$opt_immediate,
-               'prefix:s' => \$opt_prefix,
-           );
-
-exit(1) if (not $result);
-
-my $msg_ops;
-
-# we want unbuffered output
-$| = 1;
-
-my $statistics = {
-       SUITES_FAIL => 0,
-
-       TESTS_UNEXPECTED_OK => 0,
-       TESTS_EXPECTED_OK => 0,
-       TESTS_UNEXPECTED_FAIL => 0,
-       TESTS_EXPECTED_FAIL => 0,
-       TESTS_ERROR => 0,
-       TESTS_SKIP => 0,
-};
-
-require output::plain;
-$msg_ops = new output::plain("$opt_prefix/summary", $opt_verbose, $opt_immediate, $statistics, undef);
-
-my $expected_ret = parse_results($msg_ops, $statistics, *STDIN);
-
-$msg_ops->summary();
-
-exit($expected_ret);
+sys.exit(expected_ret)
diff --git a/selftest/subunithelper.py b/selftest/subunithelper.py
new file mode 100644 (file)
index 0000000..3cd0f01
--- /dev/null
@@ -0,0 +1,170 @@
+# Python module for parsing and generating the Subunit protocol
+# (Samba-specific)
+# Copyright (C) 2008-2009 Jelmer Vernooij <jelmer@samba.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+__all__ = ['parse_results']
+
+import re
+import time
+
+VALID_RESULTS = ['success', 'successful', 'failure', 'fail', 'skip', 'knownfail', 'error', 'xfail', 'skip-testsuite', 'testsuite-failure', 'testsuite-xfail', 'testsuite-success', 'testsuite-error']
+
+def parse_results(msg_ops, statistics, fh):
+    expected_fail = 0
+    open_tests = []
+
+    while fh:
+        l = fh.readline()
+        if l.startswith("test: "):
+            msg_ops.control_msg(l)
+            name = l.split(":", 1)[1].strip()
+            msg_ops.start_test(name)
+            open_tests.append(name)
+        elif l.startswith("time: "):
+            (year, month, day, hour, minute, second) = re.match(
+                "^time: (\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)\n/", l)
+            msg_ops.report_time(time.mktime(second, minute, hour, day, month-1, year-1900))
+        elif re.match("^(" + "|".join(VALID_RESULTS) + "): (.*?)( \[)?([ \t]*)( multipart)?\n", l):
+            msg_ops.control_msg(l)
+            (result, testname, hasreason) = re.match("^(" + "|".join(VALID_RESULTS) + "): (.*?)( \[)?([ \t]*)( multipart)?\n", l)
+            if hasreason:
+                reason = ""
+                # reason may be specified in next lines
+                terminated = False
+                while fh:
+                    l = fh.readline()
+                    msg_ops.control_msg(l)
+                    if l == "]\n":
+                        terminated = True
+                        break
+                    else:
+                        reason += l
+                
+                if not terminated:
+                    statistics['TESTS_ERROR']+=1
+                    msg_ops.end_test(testname, "error", 1, 
+                                       "reason (%s) interrupted" % result)
+                    return 1
+            if result in ("success", "successful"):
+                open_tests.pop() #FIXME: Check that popped value == $testname 
+                statistics['TESTS_EXPECTED_OK']+=1
+                msg_ops.end_test(testname, "success", 0, reason)
+            elif result in ("xfail", "knownfail"):
+                open_tests.pop() #FIXME: Check that popped value == $testname
+                statistics['TESTS_EXPECTED_FAIL']+=1
+                msg_ops.end_test(testname, "xfail", 0, reason)
+                expected_fail+=1
+            elif result in ("failure", "fail"):
+                open_tests.pop() #FIXME: Check that popped value == $testname
+                statistics['TESTS_UNEXPECTED_FAIL']+=1
+                msg_ops.end_test(testname, "failure", 1, reason)
+            elif result == "skip":
+                statistics['TESTS_SKIP']+=1
+                # Allow tests to be skipped without prior announcement of test
+                last = open_tests.pop()
+                if last is not None and last != testname:
+                    open_tests.append(testname)
+                msg_ops.end_test(testname, "skip", 0, reason)
+            elif result == "error":
+                statistics['TESTS_ERROR']+=1
+                open_tests.pop() #FIXME: Check that popped value == $testname
+                msg_ops.end_test(testname, "error", 1, reason)
+            elif result == "skip-testsuite":
+                msg_ops.skip_testsuite(testname)
+            elif result == "testsuite-success":
+                msg_ops.end_testsuite(testname, "success", reason)
+            elif result == "testsuite-failure":
+                msg_ops.end_testsuite(testname, "failure", reason)
+            elif result == "testsuite-xfail":
+                msg_ops.end_testsuite(testname, "xfail", reason)
+            elif result == "testsuite-error":
+                msg_ops.end_testsuite(testname, "error", reason)
+        elif l.startswith("testsuite: "):
+            msg_ops.start_testsuite(l.split(":", 1)[1].strip())
+        elif l.startswith("testsuite-count: "):
+            msg_ops.testsuite_count(int(l.split(":", 1)[1].strip()))
+        else:
+            msg_ops.output_msg(l)
+
+    while open_tests:
+        msg_ops.end_test(open_tests.pop(), "error", 1,
+                   "was started but never finished!")
+        statistics['TESTS_ERROR']+=1
+
+    # if the Filter module is in use, it will have the right counts
+    if 'total_error' in msg_ops:
+        statistics['TESTS_ERROR'] = msg_ops['total_error']
+        statistics['TESTS_UNEXPECTED_FAIL'] = msg_ops['total_fail']
+        statistics['TESTS_EXPECTED_FAIL'] = msg_ops['total_xfail']
+
+    if statistics['TESTS_ERROR'] > 0:
+        return 1
+    if statistics['TESTS_UNEXPECTED_FAIL'] > 0:
+        return 1 
+    return 0
+
+
+def start_test(testname):
+    print "test: %s" % testname
+
+def end_test(name, result, reason=None):
+    if reason:
+        print "%s: %s [" % (result, name)
+        print "%s" % reason
+        print "]"
+    else:
+        print "%s: %s" % (result, name)
+
+
+def skip_test(name, reason=None):
+    end_test(name, "skip", reason)
+
+
+def fail_test(name, reason=None):
+    end_test(name, "fail", reason)
+
+
+def success_test(name, reason=None):
+    end_test(name, "success", reason)
+
+def xfail_test(name, reason=None):
+    end_test(name, "xfail", reason)
+
+def report_time(t):
+    (sec, min, hour, mday, mon, year, wday, yday, isdst) = time.localtimet(t)
+    print "time: %04d-%02d-%02d %02d:%02d:%02d" % (year+1900, mon+1, mday, hour, min, sec)
+
+
+# The following are Samba extensions:
+def start_testsuite(name):
+    print "testsuite: %s" % name
+
+def skip_testsuite(name, reason=None):
+    if reason:
+        print "skip-testsuite: %s [\n%s\n]" % (name, reason)
+    else:
+        print "skip-testsuite: %s" % name
+
+def end_testsuite(name, result, reason=None):
+    if reason:
+        print "testsuite-$result: %s [" % name
+        print "%s" % reason
+        print "]"
+    else:
+        print "testsuite-$result: %s" % name
+
+def testsuite_count(count):
+    print "testsuite-count: %d" % count