aboutsummaryrefslogtreecommitdiff
path: root/tools/patman
diff options
context:
space:
mode:
authorAlper Nebi Yasak <alpernebiyasak@gmail.com>2022-04-02 20:06:08 +0300
committerSimon Glass <sjg@chromium.org>2022-06-28 03:09:51 +0100
commitebcaafcded40da8ae6cb4234c2ba9901c7bee644 (patch)
tree3e1adb9dd654f0bf2107765f25da91f127408f90 /tools/patman
parentdd6b92b0b9532bb4ba0ad8ac3620b1f3b81adf5b (diff)
downloadu-boot-ebcaafcded40da8ae6cb4234c2ba9901c7bee644.zip
u-boot-ebcaafcded40da8ae6cb4234c2ba9901c7bee644.tar.gz
u-boot-ebcaafcded40da8ae6cb4234c2ba9901c7bee644.tar.bz2
patman: test_util: Print test stdout/stderr within test summaries
While running tests for a python tool, the tests' outputs get printed in whatever order they happen to run, without any indication as to which output belongs to which test. Unittest supports capturing these outputs and printing them as part of the test summaries, but when a failure or error occurs it switches back to printing as the tests run. Testtools and subunit tests can do the same as their parts inherit from unittest, but they don't outright expose this functionality. On the unittest side, enable output buffering for the custom test result class. Try to avoid ugly outputs by not printing stdout/stderr before the test summary for low verbosity levels and for successful tests. On the subunit side, implement a custom TestProtocolClient that enables the same underlying functionality and injects the captured streams as additional test details. This causes them to be merged into their test's error traceback message, which is later rebuilt into an exception and passed to our unittest report class. Signed-off-by: Alper Nebi Yasak <alpernebiyasak@gmail.com> Reviewed-by: Simon Glass <sjg@chromium.org>
Diffstat (limited to 'tools/patman')
-rw-r--r--tools/patman/test_util.py33
1 files changed, 32 insertions, 1 deletions
diff --git a/tools/patman/test_util.py b/tools/patman/test_util.py
index 130d914..c27e0b3 100644
--- a/tools/patman/test_util.py
+++ b/tools/patman/test_util.py
@@ -15,6 +15,7 @@ from patman import command
from io import StringIO
+buffer_outputs = True
use_concurrent = True
try:
from concurrencytest.concurrencytest import ConcurrentTestSuite
@@ -120,6 +121,7 @@ class FullTextTestResult(unittest.TextTestResult):
0: Print nothing
1: Print a dot per test
2: Print test names
+ 3: Print test names, and buffered outputs for failing tests
"""
def __init__(self, stream, descriptions, verbosity):
self.verbosity = verbosity
@@ -139,12 +141,39 @@ class FullTextTestResult(unittest.TextTestResult):
self.printErrorList('XFAIL', self.expectedFailures)
self.printErrorList('XPASS', unexpected_successes)
+ def addError(self, test, err):
+ """Called when an error has occurred."""
+ super().addError(test, err)
+ self._mirrorOutput &= self.verbosity >= 3
+
+ def addFailure(self, test, err):
+ """Called when a test has failed."""
+ super().addFailure(test, err)
+ self._mirrorOutput &= self.verbosity >= 3
+
+ def addSubTest(self, test, subtest, err):
+ """Called at the end of a subtest."""
+ super().addSubTest(test, subtest, err)
+ self._mirrorOutput &= self.verbosity >= 3
+
+ def addSuccess(self, test):
+ """Called when a test has completed successfully"""
+ super().addSuccess(test)
+ # Don't print stdout/stderr for successful tests
+ self._mirrorOutput = False
+
def addSkip(self, test, reason):
"""Called when a test is skipped."""
# Add empty line to keep spacing consistent with other results
if not reason.endswith('\n'):
reason += '\n'
super().addSkip(test, reason)
+ self._mirrorOutput &= self.verbosity >= 3
+
+ def addExpectedFailure(self, test, err):
+ """Called when an expected failure/error occurred."""
+ super().addExpectedFailure(test, err)
+ self._mirrorOutput &= self.verbosity >= 3
def run_test_suites(toolname, debug, verbosity, test_preserve_dirs, processes,
@@ -180,12 +209,14 @@ def run_test_suites(toolname, debug, verbosity, test_preserve_dirs, processes,
runner = unittest.TextTestRunner(
stream=sys.stdout,
verbosity=(1 if verbosity is None else verbosity),
+ buffer=buffer_outputs,
resultclass=FullTextTestResult,
)
if use_concurrent and processes != 1:
suite = ConcurrentTestSuite(suite,
- fork_for_tests(processes or multiprocessing.cpu_count()))
+ fork_for_tests(processes or multiprocessing.cpu_count(),
+ buffer=buffer_outputs))
for module in class_and_module_list:
if isinstance(module, str) and (not test_name or test_name == module):