diff options
author | Jussi Pakkanen <jpakkane@gmail.com> | 2020-02-17 00:37:53 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2020-02-17 00:37:53 +0200 |
commit | d8c3dc66d5370c880db3d4473f208f01154b250a (patch) | |
tree | bc8d5cbb1b85d81ce70a3cf40513fe801ad1e9e9 | |
parent | b1304f729079fdc07523904902273ac581cfb702 (diff) | |
parent | eb1a8ecf7c2b10048f4715fb641cb6283297dde8 (diff) | |
download | meson-d8c3dc66d5370c880db3d4473f208f01154b250a.zip meson-d8c3dc66d5370c880db3d4473f208f01154b250a.tar.gz meson-d8c3dc66d5370c880db3d4473f208f01154b250a.tar.bz2 |
Merge pull request #6621 from jon-turney/project_tests_cleanup
Various cleanups and documentation improvements for run_project_tests.py
-rw-r--r-- | docs/markdown/Contributing.md | 37 | ||||
-rwxr-xr-x | run_project_tests.py | 102 | ||||
-rw-r--r-- | test cases/common/56 install script/no-installed-files | 0 |
3 files changed, 90 insertions, 49 deletions
diff --git a/docs/markdown/Contributing.md b/docs/markdown/Contributing.md index f545b77..c5b8608 100644 --- a/docs/markdown/Contributing.md +++ b/docs/markdown/Contributing.md @@ -127,6 +127,8 @@ project tests. To run all tests, execute `./run_tests.py`. Unit tests can be run with `./run_unittests.py` and project tests with `./run_project_tests.py`. +### Project tests + Subsets of project tests can be selected with `./run_project_tests.py --only` option. This can save a great deal of time when only a certain part of Meson is being tested. @@ -139,7 +141,7 @@ For example, all the CUDA project tests run and pass on Windows via `./run_project_tests.py --only cuda --backend ninja` Each project test is a standalone project that can be compiled on its -own. They are all in `test cases` subdirectory. The simplest way to +own. They are all in the `test cases` subdirectory. The simplest way to run a single project test is to do something like `./meson.py test\ cases/common/1\ trivial builddir`. The one exception to this is `test cases/unit` directory discussed below. @@ -153,13 +155,32 @@ should be implemented as a Python script. The goal of test projects is also to provide sample projects that end users can use as a base for their own projects. -All project tests follow the same pattern: they are compiled, tests -are run and finally install is run. Passing means that building and -tests succeed and installed files match the `installed_files.txt` file -in the test's source root. Any tests that require more thorough -analysis, such as checking that certain compiler arguments can be -found in the command line or that the generated pkg-config files -actually work should be done with a unit test. +All project tests follow the same pattern: they are configured, compiled, tests +are run and finally install is run. Passing means that configuring, building and +tests succeed and that installed files match those expected. + +Any tests that require more thorough analysis, such as checking that certain +compiler arguments can be found in the command line or that the generated +pkg-config files actually work should be done with a unit test. + +The following files in the test's source root are consulted, if they exist: + +* `installed_files.txt` lists the files which are expected to be installed. +Various constructs containing `?` are used to indicate platform specific +filename variations (e.g. `?so` represents the platform appropriate suffix for a +shared library) + +* `setup_env.json` contains a dictionary which specifies additional +environment variables to be set during the configure step of the test. `@ROOT@` +is replaced with the absolute path of the source directory. + +* `crossfile.ini` and `nativefile.ini` are passed to the configure step with +`--cross-file` and `--native-file` options, respectively. + +Additionally: + +* `mlog.cmd_ci_include()` can be called from anywhere inside meson to capture the +contents of an additional file into the CI log on failure. Projects needed by unit tests are in the `test cases/unit` subdirectory. They are not run as part of `./run_project_tests.py`. diff --git a/run_project_tests.py b/run_project_tests.py index 9965bc3..65e1d0c 100755 --- a/run_project_tests.py +++ b/run_project_tests.py @@ -65,17 +65,30 @@ class BuildStep(Enum): class TestResult: - def __init__(self, msg, step, stdo, stde, mlog, cicmds, conftime=0, buildtime=0, testtime=0): - self.msg = msg - self.step = step - self.stdo = stdo - self.stde = stde - self.mlog = mlog + def __init__(self, cicmds): + self.msg = '' # empty msg indicates test success + self.stdo = '' + self.stde = '' + self.mlog = '' self.cicmds = cicmds - self.conftime = conftime - self.buildtime = buildtime - self.testtime = testtime + self.conftime = 0 + self.buildtime = 0 + self.testtime = 0 + def add_step(self, step, stdo, stde, mlog='', time=0): + self.step = step + self.stdo += stdo + self.stde += stde + self.mlog += mlog + if step == BuildStep.configure: + self.conftime = time + elif step == BuildStep.build: + self.buildtime = time + elif step == BuildStep.test: + self.testtime = time + + def fail(self, msg): + self.msg = msg @functools.total_ordering class TestDef: @@ -230,14 +243,10 @@ def validate_install(srcdir: str, installdir: Path, compiler, env) -> str: # List of installed files info_file = Path(srcdir) / 'installed_files.txt' installdir = Path(installdir) - # If this exists, the test does not install any other files - noinst_file = Path('usr/no-installed-files') expected = {} # type: T.Dict[Path, bool] ret_msg = '' # Generate list of expected files - if (installdir / noinst_file).is_file(): - expected[noinst_file] = False - elif info_file.is_file(): + if info_file.is_file(): with info_file.open() as f: for line in f: line = platform_fix_name(line.strip(), compiler, env) @@ -434,16 +443,20 @@ def _run_test(testdir, test_build_dir, install_dir, extra_args, compiler, backen except Exception: mesonlog = no_meson_log_msg cicmds = run_ci_commands(mesonlog) - gen_time = time.time() - gen_start + testresult = TestResult(cicmds) + testresult.add_step(BuildStep.configure, stdo, stde, mesonlog, time.time() - gen_start) if should_fail == 'meson': if returncode == 1: - return TestResult('', BuildStep.configure, stdo, stde, mesonlog, cicmds, gen_time) + return testresult elif returncode != 0: - return TestResult('Test exited with unexpected status {}'.format(returncode), BuildStep.configure, stdo, stde, mesonlog, cicmds, gen_time) + testresult.fail('Test exited with unexpected status {}.'.format(returncode)) + return testresult else: - return TestResult('Test that should have failed succeeded', BuildStep.configure, stdo, stde, mesonlog, cicmds, gen_time) + testresult.fail('Test that should have failed succeeded.') + return testresult if returncode != 0: - return TestResult('Generating the build system failed.', BuildStep.configure, stdo, stde, mesonlog, cicmds, gen_time) + testresult.fail('Generating the build system failed.') + return testresult builddata = build.load(test_build_dir) # Touch the meson.build file to force a regenerate so we can test that # regeneration works before a build is run. @@ -453,15 +466,15 @@ def _run_test(testdir, test_build_dir, install_dir, extra_args, compiler, backen dir_args = get_backend_args_for_dir(backend, test_build_dir) build_start = time.time() pc, o, e = Popen_safe(compile_commands + dir_args, cwd=test_build_dir) - build_time = time.time() - build_start - stdo += o - stde += e + testresult.add_step(BuildStep.build, o, e, '', time.time() - build_start) if should_fail == 'build': if pc.returncode != 0: - return TestResult('', BuildStep.build, stdo, stde, mesonlog, cicmds, gen_time) - return TestResult('Test that should have failed to build succeeded', BuildStep.build, stdo, stde, mesonlog, cicmds, gen_time) + return testresult + testresult.fail('Test that should have failed to build succeeded.') + return testresult if pc.returncode != 0: - return TestResult('Compiling source code failed.', BuildStep.build, stdo, stde, mesonlog, cicmds, gen_time, build_time) + testresult.fail('Compiling source code failed.') + return testresult # Touch the meson.build file to force a regenerate so we can test that # regeneration works after a build is complete. ensure_backend_detects_changes(backend) @@ -469,37 +482,44 @@ def _run_test(testdir, test_build_dir, install_dir, extra_args, compiler, backen test_start = time.time() # Test in-process (returncode, tstdo, tstde, test_log) = run_test_inprocess(test_build_dir) - test_time = time.time() - test_start - stdo += tstdo - stde += tstde - mesonlog += test_log + testresult.add_step(BuildStep.test, tstdo, tstde, test_log, time.time() - test_start) if should_fail == 'test': if returncode != 0: - return TestResult('', BuildStep.test, stdo, stde, mesonlog, cicmds, gen_time) - return TestResult('Test that should have failed to run unit tests succeeded', BuildStep.test, stdo, stde, mesonlog, cicmds, gen_time) + return testresult + testresult.fail('Test that should have failed to run unit tests succeeded.') + return testresult if returncode != 0: - return TestResult('Running unit tests failed.', BuildStep.test, stdo, stde, mesonlog, cicmds, gen_time, build_time, test_time) + testresult.fail('Running unit tests failed.') + return testresult # Do installation, if the backend supports it if install_commands: env = os.environ.copy() env['DESTDIR'] = install_dir # Install with subprocess pi, o, e = Popen_safe(install_commands, cwd=test_build_dir, env=env) - stdo += o - stde += e + testresult.add_step(BuildStep.install, o, e) if pi.returncode != 0: - return TestResult('Running install failed.', BuildStep.install, stdo, stde, mesonlog, cicmds, gen_time, build_time, test_time) + testresult.fail('Running install failed.') + return testresult + # Clean with subprocess env = os.environ.copy() pi, o, e = Popen_safe(clean_commands + dir_args, cwd=test_build_dir, env=env) - stdo += o - stde += e + testresult.add_step(BuildStep.clean, o, e) if pi.returncode != 0: - return TestResult('Running clean failed.', BuildStep.clean, stdo, stde, mesonlog, cicmds, gen_time, build_time, test_time) + testresult.fail('Running clean failed.') + return testresult + + # Validate installed files + testresult.add_step(BuildStep.install, '', '') if not install_commands: - return TestResult('', BuildStep.install, '', '', mesonlog, cicmds, gen_time, build_time, test_time) - return TestResult(validate_install(testdir, install_dir, compiler, builddata.environment), - BuildStep.validate, stdo, stde, mesonlog, cicmds, gen_time, build_time, test_time) + return testresult + install_msg = validate_install(testdir, install_dir, compiler, builddata.environment) + if install_msg: + testresult.fail(install_msg) + return testresult + + return testresult def gather_tests(testdir: Path) -> T.Iterator[TestDef]: tests = [t.name for t in testdir.glob('*') if t.is_dir()] diff --git a/test cases/common/56 install script/no-installed-files b/test cases/common/56 install script/no-installed-files deleted file mode 100644 index e69de29..0000000 --- a/test cases/common/56 install script/no-installed-files +++ /dev/null |