diff options
-rwxr-xr-x | ci/ciimage/arch/install.sh | 2 | ||||
-rwxr-xr-x | ci/ciimage/eoan/install.sh | 1 | ||||
-rwxr-xr-x | ci/ciimage/fedora/install.sh | 2 | ||||
-rwxr-xr-x | ci/ciimage/opensuse/install.sh | 4 | ||||
-rw-r--r-- | data/schema.xsd | 96 | ||||
-rw-r--r-- | docs/markdown/snippets/junit_result_generation.md | 4 | ||||
-rw-r--r-- | mesonbuild/mtest.py | 174 | ||||
-rwxr-xr-x | run_unittests.py | 25 |
8 files changed, 275 insertions, 33 deletions
diff --git a/ci/ciimage/arch/install.sh b/ci/ciimage/arch/install.sh index 7fe139e..6cbbb27 100755 --- a/ci/ciimage/arch/install.sh +++ b/ci/ciimage/arch/install.sh @@ -12,7 +12,7 @@ pkgs=( itstool gtk3 java-environment=8 gtk-doc llvm clang sdl2 graphviz doxygen vulkan-validation-layers openssh mercurial gtk-sharp-2 qt5-tools libwmf valgrind cmake netcdf-fortran openmpi nasm gnustep-base gettext - python-jsonschema + python-jsonschema python-lxml # cuda ) diff --git a/ci/ciimage/eoan/install.sh b/ci/ciimage/eoan/install.sh index 4b3b746..7d7a1fd 100755 --- a/ci/ciimage/eoan/install.sh +++ b/ci/ciimage/eoan/install.sh @@ -11,6 +11,7 @@ export DC=gdc pkgs=( python3-pytest-xdist python3-pip libxml2-dev libxslt1-dev libyaml-dev libjson-glib-dev + python3-lxml wget unzip qt5-default clang pkg-config-arm-linux-gnueabihf diff --git a/ci/ciimage/fedora/install.sh b/ci/ciimage/fedora/install.sh index 242d677..f61d97e 100755 --- a/ci/ciimage/fedora/install.sh +++ b/ci/ciimage/fedora/install.sh @@ -13,7 +13,7 @@ pkgs=( doxygen vulkan-devel vulkan-validation-layers-devel openssh mercurial gtk-sharp2-devel libpcap-devel gpgme-devel qt5-qtbase-devel qt5-qttools-devel qt5-linguist qt5-qtbase-private-devel libwmf-devel valgrind cmake openmpi-devel nasm gnustep-base-devel gettext-devel ncurses-devel - libxml2-devel libxslt-devel libyaml-devel glib2-devel json-glib-devel + libxml2-devel libxslt-devel libyaml-devel glib2-devel json-glib-devel python3-lxml ) # Sys update diff --git a/ci/ciimage/opensuse/install.sh b/ci/ciimage/opensuse/install.sh index c5dd6df..7c90ec3 100755 --- a/ci/ciimage/opensuse/install.sh +++ b/ci/ciimage/opensuse/install.sh @@ -5,7 +5,7 @@ set -e source /ci/common.sh pkgs=( - python3-setuptools python3-wheel python3-pip python3-pytest-xdist python3 + python3-setuptools python3-wheel python3-pip python3-pytest-xdist python3 python3-lxml ninja make git autoconf automake patch python3-Cython python3-jsonschema elfutils gcc gcc-c++ gcc-fortran gcc-objc gcc-obj-c++ vala rust bison flex curl mono-core gtkmm3-devel gtest gmock protobuf-devel wxGTK3-3_2-devel gobject-introspection-devel @@ -17,7 +17,7 @@ pkgs=( libxml2-devel libxslt-devel libyaml-devel glib2-devel json-glib-devel boost-devel libboost_date_time-devel libboost_filesystem-devel libboost_locale-devel libboost_system-devel libboost_test-devel libboost_log-devel libboost_regex-devel - libboost_python-devel libboost_python-py3-1_71_0-devel libboost_regex-devel + libboost_python-py3-1_71_0-devel libboost_regex-devel ) # Sys update diff --git a/data/schema.xsd b/data/schema.xsd new file mode 100644 index 0000000..58c6bfd --- /dev/null +++ b/data/schema.xsd @@ -0,0 +1,96 @@ +<?xml version="1.0" encoding="UTF-8" ?> +<!-- from https://svn.jenkins-ci.org/trunk/hudson/dtkit/dtkit-format/dtkit-junit-model/src/main/resources/com/thalesgroup/dtkit/junit/model/xsd/junit-4.xsd --> +<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"> + + <xs:element name="failure"> + <xs:complexType mixed="true"> + <xs:attribute name="type" type="xs:string" use="optional"/> + <xs:attribute name="message" type="xs:string" use="optional"/> + </xs:complexType> + </xs:element> + + <xs:element name="error"> + <xs:complexType mixed="true"> + <xs:attribute name="type" type="xs:string" use="optional"/> + <xs:attribute name="message" type="xs:string" use="optional"/> + </xs:complexType> + </xs:element> + + <xs:element name="properties"> + <xs:complexType> + <xs:sequence> + <xs:element ref="property" maxOccurs="unbounded"/> + </xs:sequence> + </xs:complexType> + </xs:element> + + <xs:element name="property"> + <xs:complexType> + <xs:attribute name="name" type="xs:string" use="required"/> + <xs:attribute name="value" type="xs:string" use="required"/> + </xs:complexType> + </xs:element> + + <xs:element name="skipped"> + <xs:complexType mixed="true"> + <xs:attribute name="message" type="xs:string" use="optional"/> + </xs:complexType> + </xs:element> + + <xs:element name="system-err" type="xs:string"/> + <xs:element name="system-out" type="xs:string"/> + + <xs:element name="testcase"> + <xs:complexType> + <xs:sequence> + <xs:element ref="skipped" minOccurs="0" maxOccurs="1"/> + <xs:element ref="error" minOccurs="0" maxOccurs="unbounded"/> + <xs:element ref="failure" minOccurs="0" maxOccurs="unbounded"/> + <xs:element ref="system-out" minOccurs="0" maxOccurs="unbounded"/> + <xs:element ref="system-err" minOccurs="0" maxOccurs="unbounded"/> + </xs:sequence> + <xs:attribute name="name" type="xs:string" use="required"/> + <xs:attribute name="assertions" type="xs:string" use="optional"/> + <xs:attribute name="time" type="xs:string" use="optional"/> + <xs:attribute name="classname" type="xs:string" use="optional"/> + <xs:attribute name="status" type="xs:string" use="optional"/> + </xs:complexType> + </xs:element> + + <xs:element name="testsuite"> + <xs:complexType> + <xs:sequence> + <xs:element ref="properties" minOccurs="0" maxOccurs="1"/> + <xs:element ref="testcase" minOccurs="0" maxOccurs="unbounded"/> + <xs:element ref="system-out" minOccurs="0" maxOccurs="1"/> + <xs:element ref="system-err" minOccurs="0" maxOccurs="1"/> + </xs:sequence> + <xs:attribute name="name" type="xs:string" use="required"/> + <xs:attribute name="tests" type="xs:string" use="required"/> + <xs:attribute name="failures" type="xs:string" use="optional"/> + <xs:attribute name="errors" type="xs:string" use="optional"/> + <xs:attribute name="time" type="xs:string" use="optional"/> + <xs:attribute name="disabled" type="xs:string" use="optional"/> + <xs:attribute name="skipped" type="xs:string" use="optional"/> + <xs:attribute name="timestamp" type="xs:string" use="optional"/> + <xs:attribute name="hostname" type="xs:string" use="optional"/> + <xs:attribute name="id" type="xs:string" use="optional"/> + <xs:attribute name="package" type="xs:string" use="optional"/> + </xs:complexType> + </xs:element> + + <xs:element name="testsuites"> + <xs:complexType> + <xs:sequence> + <xs:element ref="testsuite" minOccurs="0" maxOccurs="unbounded"/> + </xs:sequence> + <xs:attribute name="name" type="xs:string" use="optional"/> + <xs:attribute name="time" type="xs:string" use="optional"/> + <xs:attribute name="tests" type="xs:string" use="optional"/> + <xs:attribute name="failures" type="xs:string" use="optional"/> + <xs:attribute name="disabled" type="xs:string" use="optional"/> + <xs:attribute name="errors" type="xs:string" use="optional"/> + </xs:complexType> + </xs:element> + +</xs:schema> diff --git a/docs/markdown/snippets/junit_result_generation.md b/docs/markdown/snippets/junit_result_generation.md new file mode 100644 index 0000000..fbe910b --- /dev/null +++ b/docs/markdown/snippets/junit_result_generation.md @@ -0,0 +1,4 @@ +## Meson test now produces JUnit xml from results + +Meson will now generate a JUnit compatible XML file from test results. it +will be in the meson-logs directory and is called testlog.junit.xml. diff --git a/mesonbuild/mtest.py b/mesonbuild/mtest.py index 23643c5..3239736 100644 --- a/mesonbuild/mtest.py +++ b/mesonbuild/mtest.py @@ -33,8 +33,10 @@ import signal import subprocess import sys import tempfile +import textwrap import time import typing as T +import xml.etree.ElementTree as et from . import build from . import environment @@ -320,6 +322,110 @@ class TAPParser: yield self.Error('Too many tests run (expected {}, got {})'.format(plan.count, num_tests)) + +class JunitBuilder: + + """Builder for Junit test results. + + Junit is impossible to stream out, it requires attributes counting the + total number of tests, failures, skips, and errors in the root element + and in each test suite. As such, we use a builder class to track each + test case, and calculate all metadata before writing it out. + + For tests with multiple results (like from a TAP test), we record the + test as a suite with the project_name.test_name. This allows us to track + each result separately. For tests with only one result (such as exit-code + tests) we record each one into a suite with the name project_name. The use + of the project_name allows us to sort subproject tests separately from + the root project. + """ + + def __init__(self, filename: str) -> None: + self.filename = filename + self.root = et.Element( + 'testsuites', tests='0', errors='0', failures='0') + self.suites = {} # type: T.Dict[str, et.Element] + + def log(self, name: str, test: 'TestRun') -> None: + """Log a single test case.""" + # In this case we have a test binary with multiple results. + # We want to record this so that each result is recorded + # separately + if test.results: + suitename = '{}.{}'.format(test.project, name) + assert suitename not in self.suites, 'duplicate suite' + + suite = self.suites[suitename] = et.Element( + 'testsuite', + name=suitename, + tests=str(len(test.results)), + errors=str(sum(1 for r in test.results if r is TestResult.ERROR)), + failures=str(sum(1 for r in test.results if r in + {TestResult.FAIL, TestResult.UNEXPECTEDPASS, TestResult.TIMEOUT})), + skipped=str(sum(1 for r in test.results if r is TestResult.SKIP)), + ) + + for i, result in enumerate(test.results): + # Both name and classname are required. Set them both to the + # number of the test in a TAP test, as TAP doesn't give names. + testcase = et.SubElement(suite, 'testcase', name=str(i), classname=str(i)) + if result is TestResult.SKIP: + et.SubElement(testcase, 'skipped') + elif result is TestResult.ERROR: + et.SubElement(testcase, 'error') + elif result is TestResult.FAIL: + et.SubElement(testcase, 'failure') + elif result is TestResult.UNEXPECTEDPASS: + fail = et.SubElement(testcase, 'failure') + fail.text = 'Test unexpected passed.' + elif result is TestResult.TIMEOUT: + fail = et.SubElement(testcase, 'failure') + fail.text = 'Test did not finish before configured timeout.' + if test.stdo: + out = et.SubElement(suite, 'system-out') + out.text = test.stdo.rstrip() + if test.stde: + err = et.SubElement(suite, 'system-err') + err.text = test.stde.rstrip() + else: + if test.project not in self.suites: + suite = self.suites[test.project] = et.Element( + 'testsuite', name=test.project, tests='1', errors='0', + failures='0', skipped='0') + else: + suite = self.suites[test.project] + suite.attrib['tests'] = str(int(suite.attrib['tests']) + 1) + + testcase = et.SubElement(suite, 'testcase', name=name, classname=name) + if test.res is TestResult.SKIP: + et.SubElement(testcase, 'skipped') + suite.attrib['skipped'] = str(int(suite.attrib['skipped']) + 1) + elif test.res is TestResult.ERROR: + et.SubElement(testcase, 'error') + suite.attrib['errors'] = str(int(suite.attrib['errors']) + 1) + elif test.res is TestResult.FAIL: + et.SubElement(testcase, 'failure') + suite.attrib['failures'] = str(int(suite.attrib['failures']) + 1) + if test.stdo: + out = et.SubElement(testcase, 'system-out') + out.text = test.stdo.rstrip() + if test.stde: + err = et.SubElement(testcase, 'system-err') + err.text = test.stde.rstrip() + + def write(self) -> None: + """Calculate total test counts and write out the xml result.""" + for suite in self.suites.values(): + self.root.append(suite) + # Skipped is really not allowed in the "testsuits" element + for attr in ['tests', 'errors', 'failures']: + self.root.attrib[attr] = str(int(self.root.attrib[attr]) + int(suite.attrib[attr])) + + tree = et.ElementTree(self.root) + with open(self.filename, 'wb') as f: + tree.write(f, encoding='utf-8', xml_declaration=True) + + class TestRun: @classmethod @@ -335,30 +441,29 @@ class TestRun: res = TestResult.EXPECTEDFAIL if bool(returncode) else TestResult.UNEXPECTEDPASS else: res = TestResult.FAIL if bool(returncode) else TestResult.OK - return cls(test, test_env, res, returncode, starttime, duration, stdo, stde, cmd) + return cls(test, test_env, res, [], returncode, starttime, duration, stdo, stde, cmd) @classmethod def make_tap(cls, test: 'TestSerialisation', test_env: T.Dict[str, str], returncode: int, starttime: float, duration: float, stdo: str, stde: str, cmd: T.Optional[T.List[str]]) -> 'TestRun': - res = None - num_tests = 0 + res = None # T.Optional[TestResult] + results = [] # T.List[TestResult] failed = False - num_skipped = 0 for i in TAPParser(io.StringIO(stdo)).parse(): if isinstance(i, TAPParser.Bailout): - res = TestResult.ERROR + results.append(TestResult.ERROR) + failed = True elif isinstance(i, TAPParser.Test): - if i.result == TestResult.SKIP: - num_skipped += 1 - elif i.result in (TestResult.FAIL, TestResult.UNEXPECTEDPASS): + results.append(i.result) + if i.result not in {TestResult.OK, TestResult.EXPECTEDFAIL}: failed = True - num_tests += 1 elif isinstance(i, TAPParser.Error): - res = TestResult.ERROR + results.append(TestResult.ERROR) stde += '\nTAP parsing error: ' + i.message + failed = True if returncode != 0: res = TestResult.ERROR @@ -366,7 +471,7 @@ class TestRun: if res is None: # Now determine the overall result of the test based on the outcome of the subcases - if num_skipped == num_tests: + if all(t is TestResult.SKIP for t in results): # This includes the case where num_tests is zero res = TestResult.SKIP elif test.should_fail: @@ -374,14 +479,16 @@ class TestRun: else: res = TestResult.FAIL if failed else TestResult.OK - return cls(test, test_env, res, returncode, starttime, duration, stdo, stde, cmd) + return cls(test, test_env, res, results, returncode, starttime, duration, stdo, stde, cmd) def __init__(self, test: 'TestSerialisation', test_env: T.Dict[str, str], - res: TestResult, returncode: int, starttime: float, duration: float, + res: TestResult, results: T.List[TestResult], returncode: + int, starttime: float, duration: float, stdo: T.Optional[str], stde: T.Optional[str], cmd: T.Optional[T.List[str]]): assert isinstance(res, TestResult) self.res = res + self.results = results # May be an empty list self.returncode = returncode self.starttime = starttime self.duration = duration @@ -390,6 +497,7 @@ class TestRun: self.cmd = cmd self.env = test_env self.should_fail = test.should_fail + self.project = test.project_name def get_log(self) -> str: res = '--- command ---\n' @@ -490,7 +598,7 @@ class SingleTestRunner: cmd = self._get_cmd() if cmd is None: skip_stdout = 'Not run because can not execute cross compiled binaries.' - return TestRun(self.test, self.test_env, TestResult.SKIP, GNU_SKIP_RETURNCODE, time.time(), 0.0, skip_stdout, None, None) + return TestRun(self.test, self.test_env, TestResult.SKIP, [], GNU_SKIP_RETURNCODE, time.time(), 0.0, skip_stdout, None, None) else: wrap = TestHarness.get_wrapper(self.options) if self.options.gdb: @@ -633,7 +741,7 @@ class SingleTestRunner: stdo = "" stde = additional_error if timed_out: - return TestRun(self.test, self.test_env, TestResult.TIMEOUT, p.returncode, starttime, duration, stdo, stde, cmd) + return TestRun(self.test, self.test_env, TestResult.TIMEOUT, [], p.returncode, starttime, duration, stdo, stde, cmd) else: if self.test.protocol == 'exitcode': return TestRun.make_exitcode(self.test, self.test_env, p.returncode, starttime, duration, stdo, stde, cmd) @@ -655,9 +763,11 @@ class TestHarness: self.timeout_count = 0 self.is_run = False self.tests = None + self.results = [] # type: T.List[TestRun] self.logfilename = None # type: T.Optional[str] self.logfile = None # type: T.Optional[T.TextIO] self.jsonlogfile = None # type: T.Optional[T.TextIO] + self.junit = None # type: T.Optional[JunitBuilder] if self.options.benchmark: self.tests = load_benchmarks(options.wd) else: @@ -678,12 +788,11 @@ class TestHarness: self.close_logfiles() def close_logfiles(self) -> None: - if self.logfile: - self.logfile.close() - self.logfile = None - if self.jsonlogfile: - self.jsonlogfile.close() - self.jsonlogfile = None + for f in ['logfile', 'jsonlogfile']: + lfile = getattr(self, f) + if lfile: + lfile.close() + setattr(self, f, None) def merge_suite_options(self, options: argparse.Namespace, test: 'TestSerialisation') -> T.Dict[str, str]: if ':' in options.setup: @@ -773,20 +882,24 @@ class TestHarness: self.logfile.write(result_str) if self.jsonlogfile: write_json_log(self.jsonlogfile, name, result) + if self.junit: + self.junit.log(name, result) def print_summary(self) -> None: - msg = ''' -Ok: {:<4} -Expected Fail: {:<4} -Fail: {:<4} -Unexpected Pass: {:<4} -Skipped: {:<4} -Timeout: {:<4} -'''.format(self.success_count, self.expectedfail_count, self.fail_count, + msg = textwrap.dedent(''' + Ok: {:<4} + Expected Fail: {:<4} + Fail: {:<4} + Unexpected Pass: {:<4} + Skipped: {:<4} + Timeout: {:<4} + ''').format(self.success_count, self.expectedfail_count, self.fail_count, self.unexpectedpass_count, self.skip_count, self.timeout_count) print(msg) if self.logfile: self.logfile.write(msg) + if self.junit: + self.junit.write() def print_collected_logs(self) -> None: if len(self.collected_logs) > 0: @@ -903,6 +1016,9 @@ Timeout: {:<4} if namebase: logfile_base += '-' + namebase.replace(' ', '_') + + self.junit = JunitBuilder(logfile_base + '.junit.xml') + self.logfilename = logfile_base + '.txt' self.jsonlogfilename = logfile_base + '.json' diff --git a/run_unittests.py b/run_unittests.py index 831e53f..da898a3 100755 --- a/run_unittests.py +++ b/run_unittests.py @@ -4617,6 +4617,31 @@ recommended as it is not supported on some platforms''') out = self.build() self.assertNotIn('Project configured', out) + def _test_junit(self, case: str) -> None: + try: + import lxml.etree as et + except ImportError: + raise unittest.SkipTest('lxml required, but not found.') + + schema = et.XMLSchema(et.parse(str(Path(__file__).parent / 'data' / 'schema.xsd'))) + + testdir = os.path.join(self.common_test_dir, case) + self.init(testdir) + self.run_tests() + + junit = et.parse(str(Path(self.builddir) / 'meson-logs' / 'testlog.junit.xml')) + try: + schema.assertValid(junit) + except et.DocumentInvalid as e: + self.fail(e.error_log) + + def test_junit_valid_tap(self): + self._test_junit('213 tap tests') + + def test_junit_valid_exitcode(self): + self._test_junit('44 test args') + + class FailureTests(BasePlatformTests): ''' Tests that test failure conditions. Build files here should be dynamically |