diff options
author | Jussi Pakkanen <jpakkane@gmail.com> | 2021-02-07 12:03:10 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2021-02-07 12:03:10 +0000 |
commit | 034b81a09f20902c2d8cb907e162c7e6cb48e8c1 (patch) | |
tree | 80a1b89ab632663ec056d4007d06ba65482f0ec1 | |
parent | 8b82ffa9e423558d7644c7135db4114f59537829 (diff) | |
parent | c7c2bc8db111a5be277aeb14aecfe0d28ab286a9 (diff) | |
download | meson-034b81a09f20902c2d8cb907e162c7e6cb48e8c1.zip meson-034b81a09f20902c2d8cb907e162c7e6cb48e8c1.tar.gz meson-034b81a09f20902c2d8cb907e162c7e6cb48e8c1.tar.bz2 |
Merge pull request #8288 from bonzini/test-setup-exclude-suites
introduce add_test_setup(exclude suites: ...) keyword argument
-rw-r--r-- | docs/markdown/Reference-manual.md | 3 | ||||
-rw-r--r-- | mesonbuild/build.py | 7 | ||||
-rw-r--r-- | mesonbuild/interpreter.py | 7 | ||||
-rw-r--r-- | mesonbuild/mtest.py | 132 | ||||
-rwxr-xr-x | run_unittests.py | 10 | ||||
-rw-r--r-- | test cases/unit/2 testsetups/meson.build | 3 |
6 files changed, 94 insertions, 68 deletions
diff --git a/docs/markdown/Reference-manual.md b/docs/markdown/Reference-manual.md index f623728..f12f695 100644 --- a/docs/markdown/Reference-manual.md +++ b/docs/markdown/Reference-manual.md @@ -125,6 +125,9 @@ the following: - `is_default` *(since 0.49.0)*: a bool to set whether this is the default test setup. If `true`, the setup will be used whenever `meson test` is run without the `--setup` option. +- `exclude_suites` *(since 0.57.0)*: a list of test suites that should be + excluded when using this setup. Suites specified in the `--suite` option + to `meson test` will always run, overriding `add_test_setup` if necessary. To use the test setup, run `meson test --setup=*name*` inside the build dir. diff --git a/mesonbuild/build.py b/mesonbuild/build.py index 4abc800..160ee9a 100644 --- a/mesonbuild/build.py +++ b/mesonbuild/build.py @@ -39,7 +39,6 @@ from .linkers import StaticLinker from .interpreterbase import FeatureNew if T.TYPE_CHECKING: - from .coredata import KeyedOptionDictType, OptionDictType from .interpreter import Test from .mesonlib import FileMode, FileOrString @@ -2325,7 +2324,7 @@ class CustomTarget(Target, CommandBase): for ed in unholder(extra_deps): if not isinstance(ed, (CustomTarget, BuildTarget)): raise InvalidArguments('Can only depend on toplevel targets: custom_target or build_target (executable or a library) got: {}({})' - .format(type(ed), ed)) + .format(type(ed), ed)) self.extra_depends.append(ed) for i in depend_files: if isinstance(i, (File, str)): @@ -2620,11 +2619,13 @@ class Data: class TestSetup: def __init__(self, exe_wrapper: T.Optional[T.List[str]], gdb: bool, - timeout_multiplier: int, env: EnvironmentVariables): + timeout_multiplier: int, env: EnvironmentVariables, + exclude_suites: T.List[str]): self.exe_wrapper = exe_wrapper self.gdb = gdb self.timeout_multiplier = timeout_multiplier self.env = env + self.exclude_suites = exclude_suites def get_sources_string_names(sources, backend): ''' diff --git a/mesonbuild/interpreter.py b/mesonbuild/interpreter.py index f3d7502..176c1da 100644 --- a/mesonbuild/interpreter.py +++ b/mesonbuild/interpreter.py @@ -2301,7 +2301,8 @@ permitted_kwargs = {'add_global_arguments': {'language', 'native'}, 'add_languages': {'required', 'native'}, 'add_project_link_arguments': {'language', 'native'}, 'add_project_arguments': {'language', 'native'}, - 'add_test_setup': {'exe_wrapper', 'gdb', 'timeout_multiplier', 'env', 'is_default'}, + 'add_test_setup': {'exe_wrapper', 'gdb', 'timeout_multiplier', 'env', 'is_default', + 'exclude_suites'}, 'benchmark': _base_test_args, 'build_target': known_build_target_kwargs, 'configure_file': {'input', @@ -4686,8 +4687,10 @@ different subdirectory. raise InterpreterException('\'%s\' is already set as default. ' 'is_default can be set to true only once' % self.build.test_setup_default_name) self.build.test_setup_default_name = setup_name + exclude_suites = mesonlib.stringlistify(kwargs.get('exclude_suites', [])) env = self.unpack_env_kwarg(kwargs) - self.build.test_setups[setup_name] = build.TestSetup(exe_wrapper, gdb, timeout_multiplier, env) + self.build.test_setups[setup_name] = build.TestSetup(exe_wrapper, gdb, timeout_multiplier, env, + exclude_suites) @permittedKwargs(permitted_kwargs['add_global_arguments']) @stringArgs diff --git a/mesonbuild/mtest.py b/mesonbuild/mtest.py index 24db5ce..79bb075 100644 --- a/mesonbuild/mtest.py +++ b/mesonbuild/mtest.py @@ -1114,22 +1114,6 @@ def check_testdata(objs: T.List[TestSerialisation]) -> T.List[TestSerialisation] raise MesonVersionMismatchException(obj.version, coredata_version) return objs -def load_benchmarks(build_dir: str) -> T.List[TestSerialisation]: - datafile = Path(build_dir) / 'meson-private' / 'meson_benchmark_setup.dat' - if not datafile.is_file(): - raise TestException('Directory {!r} does not seem to be a Meson build directory.'.format(build_dir)) - with datafile.open('rb') as f: - objs = check_testdata(pickle.load(f)) - return objs - -def load_tests(build_dir: str) -> T.List[TestSerialisation]: - datafile = Path(build_dir) / 'meson-private' / 'meson_test_setup.dat' - if not datafile.is_file(): - raise TestException('Directory {!r} does not seem to be a Meson build directory.'.format(build_dir)) - with datafile.open('rb') as f: - objs = check_testdata(pickle.load(f)) - return objs - # Custom waiting primitives for asyncio async def try_wait_one(*awaitables: T.Any, timeout: T.Optional[T.Union[int, float]]) -> None: @@ -1428,16 +1412,47 @@ class TestHarness: self.loggers.append(ConsoleLogger()) self.need_console = False - if self.options.benchmark: - self.tests = load_benchmarks(options.wd) - else: - self.tests = load_tests(options.wd) + self.logfile_base = None # type: T.Optional[str] + if self.options.logbase and not self.options.gdb: + namebase = None + self.logfile_base = os.path.join(self.options.wd, 'meson-logs', self.options.logbase) + + if self.options.wrapper: + namebase = os.path.basename(self.get_wrapper(self.options)[0]) + elif self.options.setup: + namebase = self.options.setup.replace(":", "_") + + if namebase: + self.logfile_base += '-' + namebase.replace(' ', '_') + + startdir = os.getcwd() + try: + if self.options.wd: + os.chdir(self.options.wd) + self.build_data = build.load(os.getcwd()) + if not self.options.setup: + self.options.setup = self.build_data.test_setup_default_name + if self.options.benchmark: + self.tests = self.load_tests('meson_benchmark_setup.dat') + else: + self.tests = self.load_tests('meson_test_setup.dat') + finally: + os.chdir(startdir) + ss = set() for t in self.tests: for s in t.suite: ss.add(s) self.suites = list(ss) + def load_tests(self, file_name: str) -> T.List[TestSerialisation]: + datafile = Path('meson-private') / file_name + if not datafile.is_file(): + raise TestException('Directory {!r} does not seem to be a Meson build directory.'.format(self.options.wd)) + with datafile.open('rb') as f: + objs = check_testdata(pickle.load(f)) + return objs + def __enter__(self) -> 'TestHarness': return self @@ -1448,16 +1463,19 @@ class TestHarness: for l in self.loggers: l.close() - def merge_suite_options(self, options: argparse.Namespace, test: TestSerialisation) -> T.Dict[str, str]: - if ':' in options.setup: - if options.setup not in self.build_data.test_setups: - sys.exit("Unknown test setup '{}'.".format(options.setup)) - current = self.build_data.test_setups[options.setup] + def get_test_setup(self, test: T.Optional[TestSerialisation]) -> build.TestSetup: + if ':' in self.options.setup: + if self.options.setup not in self.build_data.test_setups: + sys.exit("Unknown test setup '{}'.".format(self.options.setup)) + return self.build_data.test_setups[self.options.setup] else: - full_name = test.project_name + ":" + options.setup + full_name = test.project_name + ":" + self.options.setup if full_name not in self.build_data.test_setups: - sys.exit("Test setup '{}' not found from project '{}'.".format(options.setup, test.project_name)) - current = self.build_data.test_setups[full_name] + sys.exit("Test setup '{}' not found from project '{}'.".format(self.options.setup, test.project_name)) + return self.build_data.test_setups[full_name] + + def merge_setup_options(self, options: argparse.Namespace, test: TestSerialisation) -> T.Dict[str, str]: + current = self.get_test_setup(test) if not options.gdb: options.gdb = current.gdb if options.gdb: @@ -1475,10 +1493,8 @@ class TestHarness: def get_test_runner(self, test: TestSerialisation) -> SingleTestRunner: name = self.get_pretty_suite(test) options = deepcopy(self.options) - if not options.setup: - options.setup = self.build_data.test_setup_default_name - if options.setup: - env = self.merge_suite_options(options, test) + if self.options.setup: + env = self.merge_setup_options(options, test) else: env = os.environ.copy() test_env = test.env.get_env(env) @@ -1564,14 +1580,14 @@ class TestHarness: def total_failure_count(self) -> int: return self.fail_count + self.unexpectedpass_count + self.timeout_count - def doit(self, options: argparse.Namespace) -> int: + def doit(self) -> int: if self.is_run: raise RuntimeError('Test harness object can only be used once.') self.is_run = True tests = self.get_tests() if not tests: return 0 - if not options.no_rebuild and not rebuild_deps(options.wd, tests): + if not self.options.no_rebuild and not rebuild_deps(self.options.wd, tests): # We return 125 here in case the build failed. # The reason is that exit code 125 tells `git bisect run` that the current # commit should be skipped. Thus users can directly use `meson test` to @@ -1585,7 +1601,6 @@ class TestHarness: try: if self.options.wd: os.chdir(self.options.wd) - self.build_data = build.load(os.getcwd()) runners = [self.get_test_runner(test) for test in tests] self.duration_max_len = max([len(str(int(runner.timeout or 99))) for runner in runners]) @@ -1639,9 +1654,20 @@ class TestHarness: return False def test_suitable(self, test: TestSerialisation) -> bool: - return ((not self.options.include_suites or - TestHarness.test_in_suites(test, self.options.include_suites)) and not - TestHarness.test_in_suites(test, self.options.exclude_suites)) + if TestHarness.test_in_suites(test, self.options.exclude_suites): + return False + + if self.options.include_suites: + # Both force inclusion (overriding add_test_setup) and exclude + # everything else + return TestHarness.test_in_suites(test, self.options.include_suites) + + if self.options.setup: + setup = self.get_test_setup(test) + if TestHarness.test_in_suites(test, setup.exclude_suites): + return False + + return True def tests_from_args(self, tests: T.List[TestSerialisation]) -> T.Generator[TestSerialisation, None, None]: ''' @@ -1670,14 +1696,7 @@ class TestHarness: print('No tests defined.') return [] - if self.options.include_suites or self.options.exclude_suites: - tests = [] - for tst in self.tests: - if self.test_suitable(tst): - tests.append(tst) - else: - tests = self.tests - + tests = [t for t in self.tests if self.test_suitable(t)] if self.options.args: tests = list(self.tests_from_args(tests)) @@ -1692,23 +1711,12 @@ class TestHarness: l.flush() def open_logfiles(self) -> None: - if not self.options.logbase or self.options.gdb: + if not self.logfile_base: return - namebase = None - logfile_base = os.path.join(self.options.wd, 'meson-logs', self.options.logbase) - - if self.options.wrapper: - namebase = os.path.basename(self.get_wrapper(self.options)[0]) - elif self.options.setup: - namebase = self.options.setup.replace(":", "_") - - if namebase: - logfile_base += '-' + namebase.replace(' ', '_') - - self.loggers.append(JunitBuilder(logfile_base + '.junit.xml')) - self.loggers.append(JsonLogfileBuilder(logfile_base + '.json')) - self.loggers.append(TextLogfileBuilder(logfile_base + '.txt', errors='surrogateescape')) + self.loggers.append(JunitBuilder(self.logfile_base + '.junit.xml')) + self.loggers.append(JsonLogfileBuilder(self.logfile_base + '.json')) + self.loggers.append(TextLogfileBuilder(self.logfile_base + '.txt', errors='surrogateescape')) @staticmethod def get_wrapper(options: argparse.Namespace) -> T.List[str]: @@ -1913,7 +1921,7 @@ def run(options: argparse.Namespace) -> int: try: if options.list: return list_tests(th) - return th.doit(options) + return th.doit() except TestException as e: print('Meson test encountered an error:\n') if os.environ.get('MESON_FORCE_BACKTRACE'): diff --git a/run_unittests.py b/run_unittests.py index 7981df6..2e7fe91 100755 --- a/run_unittests.py +++ b/run_unittests.py @@ -2550,6 +2550,16 @@ class AllPlatformTests(BasePlatformTests): self._run(self.mtest_command + ['--setup=onlyenv3']) # Setup with only a timeout works self._run(self.mtest_command + ['--setup=timeout']) + # Setup that skips test works + self._run(self.mtest_command + ['--setup=good']) + with open(os.path.join(self.logdir, 'testlog-good.txt')) as f: + exclude_suites_log = f.read() + self.assertFalse('buggy' in exclude_suites_log) + # --suite overrides add_test_setup(xclude_suites) + self._run(self.mtest_command + ['--setup=good', '--suite', 'buggy']) + with open(os.path.join(self.logdir, 'testlog-good.txt')) as f: + include_suites_log = f.read() + self.assertTrue('buggy' in include_suites_log) def test_testsetup_selection(self): testdir = os.path.join(self.unit_test_dir, '14 testsetup selection') diff --git a/test cases/unit/2 testsetups/meson.build b/test cases/unit/2 testsetups/meson.build index 8343856..1e8f018 100644 --- a/test cases/unit/2 testsetups/meson.build +++ b/test cases/unit/2 testsetups/meson.build @@ -12,7 +12,7 @@ add_test_setup('valgrind', env : env) buggy = executable('buggy', 'buggy.c', 'impl.c') -test('Test buggy', buggy) +test('Test buggy', buggy, suite: ['buggy']) envcheck = find_program('envcheck.py') test('test-env', envcheck) @@ -23,3 +23,4 @@ add_test_setup('onlyenv2', env : 'TEST_ENV=1') add_test_setup('onlyenv3', env : ['TEST_ENV=1']) add_test_setup('wrapper', exe_wrapper : [vg, '--error-exitcode=1']) add_test_setup('timeout', timeout_multiplier : 20) +add_test_setup('good', exclude_suites : 'buggy') |