aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xtools/binman/main.py8
-rwxr-xr-xtools/buildman/main.py8
-rwxr-xr-xtools/dtoc/main.py9
-rwxr-xr-xtools/dtoc/test_fdt.py8
-rwxr-xr-xtools/patman/main.py8
-rw-r--r--tools/patman/test_util.py58
6 files changed, 38 insertions, 61 deletions
diff --git a/tools/binman/main.py b/tools/binman/main.py
index 5fb9404..14432a8 100755
--- a/tools/binman/main.py
+++ b/tools/binman/main.py
@@ -13,7 +13,6 @@ import os
import site
import sys
import traceback
-import unittest
# Get the absolute path to this file at run-time
our_path = os.path.dirname(os.path.realpath(__file__))
@@ -73,19 +72,18 @@ def RunTests(debug, verbosity, processes, test_preserve_dirs, args, toolpath):
from binman import image_test
import doctest
- result = unittest.TestResult()
test_name = args and args[0] or None
# Run the entry tests first ,since these need to be the first to import the
# 'entry' module.
- test_util.run_test_suites(
- result, debug, verbosity, test_preserve_dirs, processes, test_name,
+ result = test_util.run_test_suites(
+ 'binman', debug, verbosity, test_preserve_dirs, processes, test_name,
toolpath,
[bintool_test.TestBintool, entry_test.TestEntry, ftest.TestFunctional,
fdt_test.TestFdt, elf_test.TestElf, image_test.TestImage,
cbfs_util_test.TestCbfs, fip_util_test.TestFip])
- return test_util.report_result('binman', test_name, result)
+ return (0 if result.wasSuccessful() else 1)
def RunTestCoverage(toolpath):
"""Run the tests and check that we get 100% coverage"""
diff --git a/tools/buildman/main.py b/tools/buildman/main.py
index 3b6af24..67c560c 100755
--- a/tools/buildman/main.py
+++ b/tools/buildman/main.py
@@ -11,7 +11,6 @@ import multiprocessing
import os
import re
import sys
-import unittest
# Bring in the patman libraries
our_path = os.path.dirname(os.path.realpath(__file__))
@@ -34,19 +33,18 @@ def RunTests(skip_net_tests, verboose, args):
from buildman import test
import doctest
- result = unittest.TestResult()
test_name = args and args[0] or None
if skip_net_tests:
test.use_network = False
# Run the entry tests first ,since these need to be the first to import the
# 'entry' module.
- test_util.run_test_suites(
- result, False, verboose, False, None, test_name, [],
+ result = test_util.run_test_suites(
+ 'buildman', False, verboose, False, None, test_name, [],
[test.TestBuild, func_test.TestFunctional,
'buildman.toolchain', 'patman.gitutil'])
- return test_util.report_result('buildman', test_name, result)
+ return (0 if result.wasSuccessful() else 1)
options, args = cmdline.ParseArgs()
diff --git a/tools/dtoc/main.py b/tools/dtoc/main.py
index fac9db9..5508759 100755
--- a/tools/dtoc/main.py
+++ b/tools/dtoc/main.py
@@ -24,7 +24,6 @@ see doc/driver-model/of-plat.rst
from argparse import ArgumentParser
import os
import sys
-import unittest
# Bring in the patman libraries
our_path = os.path.dirname(os.path.realpath(__file__))
@@ -49,18 +48,18 @@ def run_tests(processes, args):
from dtoc import test_src_scan
from dtoc import test_dtoc
- result = unittest.TestResult()
sys.argv = [sys.argv[0]]
test_name = args.files and args.files[0] or None
test_dtoc.setup()
- test_util.run_test_suites(
- result, debug=True, verbosity=1, test_preserve_dirs=False,
+ result = test_util.run_test_suites(
+ toolname='dtoc', debug=True, verbosity=1, test_preserve_dirs=False,
processes=processes, test_name=test_name, toolpath=[],
class_and_module_list=[test_dtoc.TestDtoc,test_src_scan.TestSrcScan])
- return test_util.report_result('binman', test_name, result)
+ return (0 if result.wasSuccessful() else 1)
+
def RunTestCoverage():
"""Run the tests and check that we get 100% coverage"""
diff --git a/tools/dtoc/test_fdt.py b/tools/dtoc/test_fdt.py
index 3859af8..3baf443 100755
--- a/tools/dtoc/test_fdt.py
+++ b/tools/dtoc/test_fdt.py
@@ -784,13 +784,13 @@ def RunTests(args):
Returns:
Return code, 0 on success
"""
- result = unittest.TestResult()
test_name = args and args[0] or None
- test_util.run_test_suites(
- result, False, False, False, None, test_name, None,
+ result = test_util.run_test_suites(
+ 'test_fdt', False, False, False, None, test_name, None,
[TestFdt, TestNode, TestProp, TestFdtUtil])
- return test_util.report_result('fdt', test_name, result)
+ return (0 if result.wasSuccessful() else 1)
+
if __name__ != '__main__':
sys.exit(1)
diff --git a/tools/patman/main.py b/tools/patman/main.py
index 2a2ac45..66d4806 100755
--- a/tools/patman/main.py
+++ b/tools/patman/main.py
@@ -12,7 +12,6 @@ import re
import shutil
import sys
import traceback
-import unittest
if __name__ == "__main__":
# Allow 'from patman import xxx to work'
@@ -134,13 +133,12 @@ if args.cmd == 'test':
import doctest
from patman import func_test
- result = unittest.TestResult()
- test_util.run_test_suites(
- result, False, False, False, None, None, None,
+ result = test_util.run_test_suites(
+ 'patman', False, False, False, None, None, None,
[test_checkpatch.TestPatch, func_test.TestFunctional,
'gitutil', 'settings', 'terminal'])
- sys.exit(test_util.report_result('patman', args.testname, result))
+ sys.exit(0 if result.wasSuccessful() else 1)
# Process commits, produce patches files, check them, email them
elif args.cmd == 'send':
diff --git a/tools/patman/test_util.py b/tools/patman/test_util.py
index a4c2a2c..ba8f87f 100644
--- a/tools/patman/test_util.py
+++ b/tools/patman/test_util.py
@@ -102,36 +102,12 @@ def capture_sys_output():
sys.stdout, sys.stderr = old_out, old_err
-def report_result(toolname:str, test_name: str, result: unittest.TestResult):
- """Report the results from a suite of tests
-
- Args:
- toolname: Name of the tool that ran the tests
- test_name: Name of test that was run, or None for all
- result: A unittest.TestResult object containing the results
- """
- print(result)
- for test, err in result.errors:
- print(test.id(), err)
- for test, err in result.failures:
- print(test.id(), err)
- if result.skipped:
- print('%d %s test%s SKIPPED:' % (len(result.skipped), toolname,
- 's' if len(result.skipped) > 1 else ''))
- for skip_info in result.skipped:
- print('%s: %s' % (skip_info[0], skip_info[1]))
- if result.errors or result.failures:
- print('%s tests FAILED' % toolname)
- return 1
- return 0
-
-
-def run_test_suites(result, debug, verbosity, test_preserve_dirs, processes,
+def run_test_suites(toolname, debug, verbosity, test_preserve_dirs, processes,
test_name, toolpath, class_and_module_list):
"""Run a series of test suites and collect the results
Args:
- result: A unittest.TestResult object to add the results to
+ toolname: Name of the tool that ran the tests
debug: True to enable debugging, which shows a full stack trace on error
verbosity: Verbosity level to use (0-4)
test_preserve_dirs: True to preserve the input directory used by tests
@@ -145,11 +121,6 @@ def run_test_suites(result, debug, verbosity, test_preserve_dirs, processes,
class_and_module_list: List of test classes (type class) and module
names (type str) to run
"""
- for module in class_and_module_list:
- if isinstance(module, str) and (not test_name or test_name == module):
- suite = doctest.DocTestSuite(module)
- suite.run(result)
-
sys.argv = [sys.argv[0]]
if debug:
sys.argv.append('-D')
@@ -161,6 +132,19 @@ def run_test_suites(result, debug, verbosity, test_preserve_dirs, processes,
suite = unittest.TestSuite()
loader = unittest.TestLoader()
+ runner = unittest.TextTestRunner(
+ stream=sys.stdout,
+ verbosity=(1 if verbosity is None else verbosity),
+ )
+
+ if use_concurrent and processes != 1:
+ suite = ConcurrentTestSuite(suite,
+ fork_for_tests(processes or multiprocessing.cpu_count()))
+
+ for module in class_and_module_list:
+ if isinstance(module, str) and (not test_name or test_name == module):
+ suite.addTests(doctest.DocTestSuite(module))
+
for module in class_and_module_list:
if isinstance(module, str):
continue
@@ -179,9 +163,9 @@ def run_test_suites(result, debug, verbosity, test_preserve_dirs, processes,
suite.addTests(loader.loadTestsFromName(test_name, module))
else:
suite.addTests(loader.loadTestsFromTestCase(module))
- if use_concurrent and processes != 1:
- concurrent_suite = ConcurrentTestSuite(suite,
- fork_for_tests(processes or multiprocessing.cpu_count()))
- concurrent_suite.run(result)
- else:
- suite.run(result)
+
+ print(f" Running {toolname} tests ".center(70, "="))
+ result = runner.run(suite)
+ print()
+
+ return result