aboutsummaryrefslogtreecommitdiff
path: root/tools/dtoc
diff options
context:
space:
mode:
authorAlper Nebi Yasak <alpernebiyasak@gmail.com>2022-04-02 20:06:06 +0300
committerSimon Glass <sjg@chromium.org>2022-06-28 03:09:51 +0100
commitd8318feba1ef3b2a74495ea7dca33ad1276a4ffe (patch)
treeb9bcd3f8e6cd91a6f7c1ab3ea84047a4cfe1ebf1 /tools/dtoc
parentce12c47b92152e9457d3daa3ddbf53c1cc3de0bb (diff)
downloadu-boot-d8318feba1ef3b2a74495ea7dca33ad1276a4ffe.zip
u-boot-d8318feba1ef3b2a74495ea7dca33ad1276a4ffe.tar.gz
u-boot-d8318feba1ef3b2a74495ea7dca33ad1276a4ffe.tar.bz2
patman: test_util: Use unittest text runner to print test results
The python tools' test utilities handle printing test results, but the output is quite bare compared to an ordinary unittest run. Delegate printing the results to a unittest text runner, which gives us niceties like clear separation between each test's result and how long it took to run the test suite. Unfortunately it does not print info for skipped tests by default, but this can be handled later by a custom test result subclass. It also does not print the tool name; manually print a heading that includes the toolname so that the outputs of each tool's tests are distinguishable in the CI output. Signed-off-by: Alper Nebi Yasak <alpernebiyasak@gmail.com> Reviewed-by: Simon Glass <sjg@chromium.org>
Diffstat (limited to 'tools/dtoc')
-rwxr-xr-xtools/dtoc/main.py9
-rwxr-xr-xtools/dtoc/test_fdt.py8
2 files changed, 8 insertions, 9 deletions
diff --git a/tools/dtoc/main.py b/tools/dtoc/main.py
index fac9db9..5508759 100755
--- a/tools/dtoc/main.py
+++ b/tools/dtoc/main.py
@@ -24,7 +24,6 @@ see doc/driver-model/of-plat.rst
from argparse import ArgumentParser
import os
import sys
-import unittest
# Bring in the patman libraries
our_path = os.path.dirname(os.path.realpath(__file__))
@@ -49,18 +48,18 @@ def run_tests(processes, args):
from dtoc import test_src_scan
from dtoc import test_dtoc
- result = unittest.TestResult()
sys.argv = [sys.argv[0]]
test_name = args.files and args.files[0] or None
test_dtoc.setup()
- test_util.run_test_suites(
- result, debug=True, verbosity=1, test_preserve_dirs=False,
+ result = test_util.run_test_suites(
+ toolname='dtoc', debug=True, verbosity=1, test_preserve_dirs=False,
processes=processes, test_name=test_name, toolpath=[],
class_and_module_list=[test_dtoc.TestDtoc,test_src_scan.TestSrcScan])
- return test_util.report_result('binman', test_name, result)
+ return (0 if result.wasSuccessful() else 1)
+
def RunTestCoverage():
"""Run the tests and check that we get 100% coverage"""
diff --git a/tools/dtoc/test_fdt.py b/tools/dtoc/test_fdt.py
index 3859af8..3baf443 100755
--- a/tools/dtoc/test_fdt.py
+++ b/tools/dtoc/test_fdt.py
@@ -784,13 +784,13 @@ def RunTests(args):
Returns:
Return code, 0 on success
"""
- result = unittest.TestResult()
test_name = args and args[0] or None
- test_util.run_test_suites(
- result, False, False, False, None, test_name, None,
+ result = test_util.run_test_suites(
+ 'test_fdt', False, False, False, None, test_name, None,
[TestFdt, TestNode, TestProp, TestFdtUtil])
- return test_util.report_result('fdt', test_name, result)
+ return (0 if result.wasSuccessful() else 1)
+
if __name__ != '__main__':
sys.exit(1)