subunit: Use standard functions for addSuccess, addExpectedFail,
[garming/samba-autobuild/.git] / selftest / subunithelper.py
1 # Python module for parsing and generating the Subunit protocol
2 # (Samba-specific)
3 # Copyright (C) 2008-2009 Jelmer Vernooij <jelmer@samba.org>
4 #
5 # This program is free software; you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation; either version 3 of the License, or
8 # (at your option) any later version.
9
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 # GNU General Public License for more details.
14
15 # You should have received a copy of the GNU General Public License
16 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
17
18 __all__ = ['parse_results']
19
20 import re
21 import sys
22 import subunit
23 import subunit.iso8601
24 import testtools
25
26 VALID_RESULTS = ['success', 'successful', 'failure', 'fail', 'skip', 'knownfail', 'error', 'xfail', 'skip-testsuite', 'testsuite-failure', 'testsuite-xfail', 'testsuite-success', 'testsuite-error']
27
28 class TestsuiteEnabledTestResult(testtools.testresult.TestResult):
29
30     def start_testsuite(self, name):
31         raise NotImplementedError(self.start_testsuite)
32
33
34 def parse_results(msg_ops, statistics, fh):
35     expected_fail = 0
36     open_tests = {}
37
38     while fh:
39         l = fh.readline()
40         if l == "":
41             break
42         parts = l.split(None, 1)
43         if not len(parts) == 2 or not l.startswith(parts[0]):
44             msg_ops.output_msg(l)
45             continue
46         command = parts[0].rstrip(":")
47         arg = parts[1]
48         if command in ("test", "testing"):
49             msg_ops.control_msg(l)
50             name = arg.rstrip()
51             test = subunit.RemotedTestCase(name)
52             if name in open_tests:
53                 msg_ops.addError(open_tests.pop(name), "Test already running")
54             msg_ops.startTest(test)
55             open_tests[name] = test
56         elif command == "time":
57             msg_ops.control_msg(l)
58             try:
59                 dt = subunit.iso8601.parse_date(arg.rstrip("\n"))
60             except TypeError, e:
61                 print "Unable to parse time line: %s" % arg.rstrip("\n")
62             else:
63                 msg_ops.time(dt)
64         elif command in VALID_RESULTS:
65             msg_ops.control_msg(l)
66             result = command
67             grp = re.match("(.*?)( \[)?([ \t]*)( multipart)?\n", arg)
68             (testname, hasreason) = (grp.group(1), grp.group(2))
69             if hasreason:
70                 reason = ""
71                 # reason may be specified in next lines
72                 terminated = False
73                 while fh:
74                     l = fh.readline()
75                     if l == "":
76                         break
77                     msg_ops.control_msg(l)
78                     if l == "]\n":
79                         terminated = True
80                         break
81                     else:
82                         reason += l
83
84                 if not terminated:
85                     statistics['TESTS_ERROR']+=1
86                     msg_ops.addError(subunit.RemotedTestCase(testname), "reason (%s) interrupted" % result)
87                     return 1
88             else:
89                 reason = None
90             if result in ("success", "successful"):
91                 try:
92                     test = open_tests.pop(testname)
93                 except KeyError:
94                     statistics['TESTS_ERROR']+=1
95                     msg_ops.addError(subunit.RemotedTestCase(testname), "Test was never started")
96                 else:
97                     statistics['TESTS_EXPECTED_OK']+=1
98                     msg_ops.addSuccess(test, reason)
99             elif result in ("xfail", "knownfail"):
100                 try:
101                     test = open_tests.pop(testname)
102                 except KeyError:
103                     statistics['TESTS_ERROR']+=1
104                     msg_ops.addError(subunit.RemotedTestCase(testname), "Test was never started")
105                 else:
106                     statistics['TESTS_EXPECTED_FAIL']+=1
107                     msg_ops.addExpectedFail(test, reason)
108                     expected_fail+=1
109             elif result in ("failure", "fail"):
110                 try:
111                     test = open_tests.pop(testname)
112                 except KeyError:
113                     statistics['TESTS_ERROR']+=1
114                     msg_ops.addError(subunit.RemotedTestCase(testname), "Test was never started")
115                 else:
116                     statistics['TESTS_UNEXPECTED_FAIL']+=1
117                     msg_ops.addFailure(test, reason)
118             elif result == "skip":
119                 statistics['TESTS_SKIP']+=1
120                 # Allow tests to be skipped without prior announcement of test
121                 try:
122                     test = open_tests.pop(testname)
123                 except KeyError:
124                     test = subunit.RemotedTestCase(testname)
125                 msg_ops.addSkip(test, reason)
126             elif result == "error":
127                 statistics['TESTS_ERROR']+=1
128                 try:
129                     test = open_tests.pop(testname)
130                 except KeyError:
131                     test = subunit.RemotedTestCase(testname)
132                 msg_ops.addError(test, reason)
133             elif result == "skip-testsuite":
134                 msg_ops.skip_testsuite(testname)
135             elif result == "testsuite-success":
136                 msg_ops.end_testsuite(testname, "success", reason)
137             elif result == "testsuite-failure":
138                 msg_ops.end_testsuite(testname, "failure", reason)
139             elif result == "testsuite-xfail":
140                 msg_ops.end_testsuite(testname, "xfail", reason)
141             elif result == "testsuite-error":
142                 msg_ops.end_testsuite(testname, "error", reason)
143             else:
144                 raise AssertionError("Recognized but unhandled result %r" %
145                     result)
146         elif command == "testsuite":
147             msg_ops.start_testsuite(arg.strip())
148         elif command == "progress":
149             arg = arg.strip()
150             if arg == "pop":
151                 msg_ops.progress(None, subunit.PROGRESS_POP)
152             elif arg == "push":
153                 msg_ops.progress(None, subunit.PROGRESS_PUSH)
154             elif arg[0] in '+-':
155                 msg_ops.progress(int(arg), subunit.PROGRESS_CUR)
156             else:
157                 msg_ops.progress(int(arg), subunit.PROGRESS_SET)
158         else:
159             msg_ops.output_msg(l)
160
161     while open_tests:
162         msg_ops.end_test(open_tests.pop(), "error", True,
163                    "was started but never finished!")
164         statistics['TESTS_ERROR']+=1
165
166     if statistics['TESTS_ERROR'] > 0:
167         return 1
168     if statistics['TESTS_UNEXPECTED_FAIL'] > 0:
169         return 1 
170     return 0
171
172
173 class SubunitOps(subunit.TestProtocolClient,TestsuiteEnabledTestResult):
174
175     def addError(self, test, details=None):
176         self.end_test(test.id(), "error", details)
177
178     def addSuccess(self, test, details=None):
179         self.end_test(test.id(), "success", details)
180
181     def addExpectedFail(self, test, details=None):
182         self.end_test(test.id(), "xfail", details)
183
184     def addFailure(self, test, details=None):
185         self.end_test(test.id(), "failure", details)
186
187     def addSkip(self, test, details=None):
188         self.end_test(test.id(), "skip", details)
189
190     def end_test(self, name, result, reason=None):
191         if reason:
192             self._stream.write("%s: %s [\n%s\n]\n" % (result, name, reason))
193         else:
194             self._stream.write("%s: %s\n" % (result, name))
195
196     def skip_test(self, name, reason=None):
197         self.end_test(name, "skip", reason)
198
199     def fail_test(self, name, reason=None):
200         self.end_test(name, "fail", reason)
201
202     def success_test(self, name, reason=None):
203         self.end_test(name, "success", reason)
204
205     def xfail_test(self, name, reason=None):
206         self.end_test(name, "xfail", reason)
207
208     # The following are Samba extensions:
209     def start_testsuite(self, name):
210         self._stream.write("testsuite: %s\n" % name)
211
212     def skip_testsuite(self, name, reason=None):
213         if reason:
214             self._stream.write("skip-testsuite: %s [\n%s\n]\n" % (name, reason))
215         else:
216             self._stream.write("skip-testsuite: %s\n" % name)
217
218     def end_testsuite(self, name, result, reason=None):
219         if reason:
220             self._stream.write("testsuite-%s: %s [\n%s\n]\n" % (result, name, reason))
221         else:
222             self._stream.write("testsuite-%s: %s\n" % (result, name))
223
224
225 def read_test_regexes(name):
226     ret = {}
227     f = open(name, 'r')
228     try:
229         for l in f:
230             l = l.strip()
231             if l == "" or l[0] == "#":
232                 continue
233             if "#" in l:
234                 (regex, reason) = l.split("#", 1)
235                 ret[regex.strip()] = reason.strip()
236             else:
237                 ret[l] = None
238     finally:
239         f.close()
240     return ret
241
242
243 def find_in_list(regexes, fullname):
244     for regex, reason in regexes.iteritems():
245         if re.match(regex, fullname):
246             if reason is None:
247                 return ""
248             return reason
249     return None
250
251
252 class FilterOps(testtools.testresult.TestResult):
253
254     def control_msg(self, msg):
255         pass # We regenerate control messages, so ignore this
256
257     def time(self, time):
258         self._ops.time(time)
259
260     def progress(self, delta, whence):
261         self._ops.progress(delta, whence)
262
263     def output_msg(self, msg):
264         if self.output is None:
265             sys.stdout.write(msg)
266         else:
267             self.output+=msg
268
269     def startTest(self, test):
270         test = self._add_prefix(test)
271         if self.strip_ok_output:
272            self.output = ""
273
274         self._ops.startTest(test)
275
276     def _add_prefix(self, test):
277         if self.prefix is not None:
278             return subunit.RemotedTestCase(self.prefix + test.id())
279         else:
280             return test
281
282     def addError(self, test, details=None):
283         test = self._add_prefix(test)
284         self.end_test(test.id(), "error", True, details)
285
286     def addSkip(self, test, details=None):
287         test = self._add_prefix(test)
288         self.end_test(test.id(), "skip", False, details)
289
290     def addExpectedFail(self, test, details=None):
291         test = self._add_prefix(test)
292         self.end_test(test.id(), "xfail", False, details)
293
294     def addFailure(self, test, details=None):
295         test = self._add_prefix(test)
296         self.end_test(test.id(), "failure", True, details)
297
298     def addSuccess(self, test, details=None):
299         test = self._add_prefix(test)
300         self.end_test(test.id(), "success", False, details)
301
302     def end_test(self, testname, result, unexpected, reason):
303         if result in ("fail", "failure") and not unexpected:
304             result = "xfail"
305             self.xfail_added+=1
306             self.total_xfail+=1
307         xfail_reason = find_in_list(self.expected_failures, testname)
308         if xfail_reason is not None and result in ("fail", "failure"):
309             result = "xfail"
310             self.xfail_added+=1
311             self.total_xfail+=1
312             reason += xfail_reason
313
314         if result in ("fail", "failure"):
315             self.fail_added+=1
316             self.total_fail+=1
317
318         if result == "error":
319             self.error_added+=1
320             self.total_error+=1
321
322         if self.strip_ok_output:
323             if result not in ("success", "xfail", "skip"):
324                 print self.output
325         self.output = None
326
327         self._ops.end_test(testname, result, reason)
328
329     def skip_testsuite(self, name, reason=None):
330         self._ops.skip_testsuite(name, reason)
331
332     def start_testsuite(self, name):
333         self._ops.start_testsuite(name)
334
335         self.error_added = 0
336         self.fail_added = 0
337         self.xfail_added = 0
338
339     def end_testsuite(self, name, result, reason=None):
340         xfail = False
341
342         if self.xfail_added > 0:
343             xfail = True
344         if self.fail_added > 0 or self.error_added > 0:
345             xfail = False
346
347         if xfail and result in ("fail", "failure"):
348             result = "xfail"
349
350         if self.fail_added > 0 and result != "failure":
351             result = "failure"
352             if reason is None:
353                 reason = "Subunit/Filter Reason"
354             reason += "\n failures[%d]" % self.fail_added
355
356         if self.error_added > 0 and result != "error":
357             result = "error"
358             if reason is None:
359                 reason = "Subunit/Filter Reason"
360             reason += "\n errors[%d]" % self.error_added
361
362         self._ops.end_testsuite(name, result, reason)
363
364     def __init__(self, out, prefix, expected_failures, strip_ok_output):
365         self._ops = out
366         self.output = None
367         self.prefix = prefix
368         self.expected_failures = expected_failures
369         self.strip_ok_output = strip_ok_output
370         self.xfail_added = 0
371         self.total_xfail = 0
372         self.total_error = 0
373         self.total_fail = 0