Fix up buffering to make pdb usable.
authorRobert Collins <robertc@robertcollins.net>
Sun, 3 Mar 2013 11:42:05 +0000 (00:42 +1300)
committerRobert Collins <robertc@robertcollins.net>
Sun, 3 Mar 2013 11:42:05 +0000 (00:42 +1300)
python/subunit/run.py
python/subunit/test_results.py
python/subunit/v2.py

index 479691d11e563ffe420861047184d475d0b93651..2e07d8690a2985fe2a8e95136ed4ae3a8d9dae34 100755 (executable)
@@ -20,6 +20,7 @@
   $ python -m subunit.run mylib.tests.test_suite
 """
 
+import os
 import sys
 
 from testtools import ExtendedToStreamDecorator
@@ -84,6 +85,9 @@ class SubunitTestProgram(TestProgram):
 
 
 if __name__ == '__main__':
+    # Disable the default buffering, for Python 2.x where pdb doesn't do it
+    # on non-ttys.
+    sys.stdout = os.fdopen(sys.stdout.fileno(), 'ab', 0)
     stream = get_default_formatter()
     runner = SubunitTestRunner
     SubunitTestProgram(module=None, argv=sys.argv, testRunner=runner,
index 7d609629b79dea1acaafb0df25b19b6553091fee..c9c768185fabe8aa10bdcab7fc227d32b3d3ac56 100644 (file)
@@ -691,3 +691,4 @@ class CatFiles(StreamResult):
         mime_type=None, route_code=None, timestamp=None):
         if file_name is not None:
             self.stream.write(file_bytes)
+            self.stream.flush()
index 89a2e725771885d8638e99bbac3996b31b418b14..34ab838f528258f6bd3d71a429dffc5acc5f20a0 100644 (file)
@@ -16,6 +16,7 @@
 
 import datetime
 from io import UnsupportedOperation
+import os
 import select
 import struct
 import zlib
@@ -24,6 +25,7 @@ import subunit
 import subunit.iso8601 as iso8601
 
 __all__ = [
+    'ByteStreamToStreamResult',
     'StreamResultToBytes',
     ]
 
@@ -222,15 +224,29 @@ class ByteStreamToStreamResult(object):
                         # Won't be able to select, fallback to
                         # one-byte-at-a-time.
                         break
-                    readable = select.select([self.source], [], [], 0.050)[0]
+                    # Note: this has a very low timeout because with stdin, the
+                    # BufferedIO layer typically has all the content available
+                    # from the stream when e.g. pdb is dropped into, leading to
+                    # select always timing out when in fact we could have read
+                    # (from the buffer layer) - we typically fail to aggregate
+                    # any content on 3.x Pythons.
+                    readable = select.select([self.source], [], [], 0.000001)[0]
                     if readable:
-                        buffered.append(self.source.read(1))
+                        content = self.source.read(1)
+                        if len(content) and content[0] != SIGNATURE[0]:
+                            buffered.append(content)
+                        else:
+                            # EOF or we hit a new packet.
+                            break
                     if not readable or len(buffered) >= 1048576:
                         break
                 result.status(
                     file_name=self.non_subunit_name,
                     file_bytes=b''.join(buffered))
-                continue
+                if not len(content) or content[0] != SIGNATURE[0]:
+                    continue
+                # Fall through to process the packet whose first byte is in
+                # content.
             try:
                 packet = [SIGNATURE]
                 self._parse(packet, result)