aboutsummaryrefslogtreecommitdiff
path: root/tools/patman/test_util.py
diff options
context:
space:
mode:
authorAlper Nebi Yasak <alpernebiyasak@gmail.com>2022-04-02 20:06:06 +0300
committerSimon Glass <sjg@chromium.org>2022-06-28 03:09:51 +0100
commitd8318feba1ef3b2a74495ea7dca33ad1276a4ffe (patch)
treeb9bcd3f8e6cd91a6f7c1ab3ea84047a4cfe1ebf1 /tools/patman/test_util.py
parentce12c47b92152e9457d3daa3ddbf53c1cc3de0bb (diff)
downloadu-boot-d8318feba1ef3b2a74495ea7dca33ad1276a4ffe.zip
u-boot-d8318feba1ef3b2a74495ea7dca33ad1276a4ffe.tar.gz
u-boot-d8318feba1ef3b2a74495ea7dca33ad1276a4ffe.tar.bz2
patman: test_util: Use unittest text runner to print test results
The python tools' test utilities handle printing test results, but the output is quite bare compared to an ordinary unittest run. Delegate printing the results to a unittest text runner, which gives us niceties like clear separation between each test's result and how long it took to run the test suite. Unfortunately it does not print info for skipped tests by default, but this can be handled later by a custom test result subclass. It also does not print the tool name; manually print a heading that includes the toolname so that the outputs of each tool's tests are distinguishable in the CI output. Signed-off-by: Alper Nebi Yasak <alpernebiyasak@gmail.com> Reviewed-by: Simon Glass <sjg@chromium.org>
Diffstat (limited to 'tools/patman/test_util.py')
-rw-r--r--tools/patman/test_util.py58
1 files changed, 21 insertions, 37 deletions
diff --git a/tools/patman/test_util.py b/tools/patman/test_util.py
index a4c2a2c..ba8f87f 100644
--- a/tools/patman/test_util.py
+++ b/tools/patman/test_util.py
@@ -102,36 +102,12 @@ def capture_sys_output():
sys.stdout, sys.stderr = old_out, old_err
-def report_result(toolname:str, test_name: str, result: unittest.TestResult):
- """Report the results from a suite of tests
-
- Args:
- toolname: Name of the tool that ran the tests
- test_name: Name of test that was run, or None for all
- result: A unittest.TestResult object containing the results
- """
- print(result)
- for test, err in result.errors:
- print(test.id(), err)
- for test, err in result.failures:
- print(test.id(), err)
- if result.skipped:
- print('%d %s test%s SKIPPED:' % (len(result.skipped), toolname,
- 's' if len(result.skipped) > 1 else ''))
- for skip_info in result.skipped:
- print('%s: %s' % (skip_info[0], skip_info[1]))
- if result.errors or result.failures:
- print('%s tests FAILED' % toolname)
- return 1
- return 0
-
-
-def run_test_suites(result, debug, verbosity, test_preserve_dirs, processes,
+def run_test_suites(toolname, debug, verbosity, test_preserve_dirs, processes,
test_name, toolpath, class_and_module_list):
"""Run a series of test suites and collect the results
Args:
- result: A unittest.TestResult object to add the results to
+ toolname: Name of the tool that ran the tests
debug: True to enable debugging, which shows a full stack trace on error
verbosity: Verbosity level to use (0-4)
test_preserve_dirs: True to preserve the input directory used by tests
@@ -145,11 +121,6 @@ def run_test_suites(result, debug, verbosity, test_preserve_dirs, processes,
class_and_module_list: List of test classes (type class) and module
names (type str) to run
"""
- for module in class_and_module_list:
- if isinstance(module, str) and (not test_name or test_name == module):
- suite = doctest.DocTestSuite(module)
- suite.run(result)
-
sys.argv = [sys.argv[0]]
if debug:
sys.argv.append('-D')
@@ -161,6 +132,19 @@ def run_test_suites(result, debug, verbosity, test_preserve_dirs, processes,
suite = unittest.TestSuite()
loader = unittest.TestLoader()
+ runner = unittest.TextTestRunner(
+ stream=sys.stdout,
+ verbosity=(1 if verbosity is None else verbosity),
+ )
+
+ if use_concurrent and processes != 1:
+ suite = ConcurrentTestSuite(suite,
+ fork_for_tests(processes or multiprocessing.cpu_count()))
+
+ for module in class_and_module_list:
+ if isinstance(module, str) and (not test_name or test_name == module):
+ suite.addTests(doctest.DocTestSuite(module))
+
for module in class_and_module_list:
if isinstance(module, str):
continue
@@ -179,9 +163,9 @@ def run_test_suites(result, debug, verbosity, test_preserve_dirs, processes,
suite.addTests(loader.loadTestsFromName(test_name, module))
else:
suite.addTests(loader.loadTestsFromTestCase(module))
- if use_concurrent and processes != 1:
- concurrent_suite = ConcurrentTestSuite(suite,
- fork_for_tests(processes or multiprocessing.cpu_count()))
- concurrent_suite.run(result)
- else:
- suite.run(result)
+
+ print(f" Running {toolname} tests ".center(70, "="))
+ result = runner.run(suite)
+ print()
+
+ return result