diff options
author | Daniel Mensinger <daniel@mensinger-ka.de> | 2020-09-01 14:28:08 +0200 |
---|---|---|
committer | Daniel Mensinger <daniel@mensinger-ka.de> | 2020-09-08 20:15:58 +0200 |
commit | 23818fc5a389c49e2673f79af2c90d9d56b1aaf0 (patch) | |
tree | 75d73e4d81c9b1ca3187668808a80a8673ead0e6 /mesonbuild/mtest.py | |
parent | 1b19822a2df7ff969e3734973f131df9ed9d2ee3 (diff) | |
download | meson-23818fc5a389c49e2673f79af2c90d9d56b1aaf0.zip meson-23818fc5a389c49e2673f79af2c90d9d56b1aaf0.tar.gz meson-23818fc5a389c49e2673f79af2c90d9d56b1aaf0.tar.bz2 |
typing: more fixes
Diffstat (limited to 'mesonbuild/mtest.py')
-rw-r--r-- | mesonbuild/mtest.py | 67 |
1 files changed, 35 insertions, 32 deletions
diff --git a/mesonbuild/mtest.py b/mesonbuild/mtest.py index cc82bd0..86d7384 100644 --- a/mesonbuild/mtest.py +++ b/mesonbuild/mtest.py @@ -43,10 +43,7 @@ from . import environment from . import mlog from .dependencies import ExternalProgram from .mesonlib import MesonException, get_wine_shortpath, split_args, join_args -from .backend.backends import TestProtocol - -if T.TYPE_CHECKING: - from .backend.backends import TestSerialisation +from .backend.backends import TestProtocol, TestSerialisation # GNU autotools interprets a return code of 77 from tests it executes to # mean that the test should be skipped. @@ -445,7 +442,7 @@ class JunitBuilder: class TestRun: @classmethod - def make_gtest(cls, test: 'TestSerialisation', test_env: T.Dict[str, str], + def make_gtest(cls, test: TestSerialisation, test_env: T.Dict[str, str], returncode: int, starttime: float, duration: float, stdo: T.Optional[str], stde: T.Optional[str], cmd: T.Optional[T.List[str]]) -> 'TestRun': @@ -459,7 +456,7 @@ class TestRun: junit=tree) @classmethod - def make_exitcode(cls, test: 'TestSerialisation', test_env: T.Dict[str, str], + def make_exitcode(cls, test: TestSerialisation, test_env: T.Dict[str, str], returncode: int, starttime: float, duration: float, stdo: T.Optional[str], stde: T.Optional[str], cmd: T.Optional[T.List[str]], **kwargs: T.Any) -> 'TestRun': @@ -474,7 +471,7 @@ class TestRun: return cls(test, test_env, res, [], returncode, starttime, duration, stdo, stde, cmd, **kwargs) @classmethod - def make_tap(cls, test: 'TestSerialisation', test_env: T.Dict[str, str], + def make_tap(cls, test: TestSerialisation, test_env: T.Dict[str, str], returncode: int, starttime: float, duration: float, stdo: str, stde: str, cmd: T.Optional[T.List[str]]) -> 'TestRun': @@ -511,7 +508,7 @@ class TestRun: return cls(test, test_env, res, results, returncode, starttime, duration, stdo, stde, cmd) - def __init__(self, test: 'TestSerialisation', test_env: T.Dict[str, str], + def __init__(self, test: TestSerialisation, test_env: T.Dict[str, str], res: TestResult, results: T.List[TestResult], returncode: int, starttime: float, duration: float, stdo: T.Optional[str], stde: T.Optional[str], @@ -577,26 +574,32 @@ def write_json_log(jsonlogfile: T.TextIO, test_name: str, result: TestRun) -> No def run_with_mono(fname: str) -> bool: return fname.endswith('.exe') and not (is_windows() or is_cygwin()) -def load_benchmarks(build_dir: str) -> T.List['TestSerialisation']: +def load_benchmarks(build_dir: str) -> T.List[TestSerialisation]: datafile = Path(build_dir) / 'meson-private' / 'meson_benchmark_setup.dat' if not datafile.is_file(): raise TestException('Directory {!r} does not seem to be a Meson build directory.'.format(build_dir)) with datafile.open('rb') as f: - obj = T.cast(T.List['TestSerialisation'], pickle.load(f)) + obj = pickle.load(f) + assert isinstance(obj, list) + for i in obj: + assert isinstance(i, TestSerialisation) return obj -def load_tests(build_dir: str) -> T.List['TestSerialisation']: +def load_tests(build_dir: str) -> T.List[TestSerialisation]: datafile = Path(build_dir) / 'meson-private' / 'meson_test_setup.dat' if not datafile.is_file(): raise TestException('Directory {!r} does not seem to be a Meson build directory.'.format(build_dir)) with datafile.open('rb') as f: - obj = T.cast(T.List['TestSerialisation'], pickle.load(f)) + obj = pickle.load(f) + assert isinstance(obj, list) + for i in obj: + assert isinstance(i, TestSerialisation) return obj class SingleTestRunner: - def __init__(self, test: 'TestSerialisation', test_env: T.Dict[str, str], + def __init__(self, test: TestSerialisation, test_env: T.Dict[str, str], env: T.Dict[str, str], options: argparse.Namespace): self.test = test self.test_env = test_env @@ -605,9 +608,9 @@ class SingleTestRunner: def _get_cmd(self) -> T.Optional[T.List[str]]: if self.test.fname[0].endswith('.jar'): - return ['java', '-jar'] + T.cast(T.List[str], self.test.fname) + return ['java', '-jar'] + self.test.fname elif not self.test.is_cross_built and run_with_mono(self.test.fname[0]): - return ['mono'] + T.cast(T.List[str], self.test.fname) + return ['mono'] + self.test.fname elif self.test.cmd_is_built and self.test.needs_exe_wrapper: if self.test.exe_runner is None: # Can not run test on cross compiled executable @@ -620,8 +623,8 @@ class SingleTestRunner: msg = ('The exe_wrapper defined in the cross file {!r} was not ' 'found. Please check the command and/or add it to PATH.') raise TestException(msg.format(self.test.exe_runner.name)) - return T.cast(T.List[str], self.test.exe_runner.get_command()) + T.cast(T.List[str], self.test.fname) - return T.cast(T.List[str], self.test.fname) + return self.test.exe_runner.get_command() + self.test.fname + return self.test.fname def run(self) -> TestRun: cmd = self._get_cmd() @@ -833,7 +836,7 @@ class TestHarness: lfile.close() setattr(self, f, None) - def merge_suite_options(self, options: argparse.Namespace, test: 'TestSerialisation') -> T.Dict[str, str]: + def merge_suite_options(self, options: argparse.Namespace, test: TestSerialisation) -> T.Dict[str, str]: if ':' in options.setup: if options.setup not in self.build_data.test_setups: sys.exit("Unknown test setup '{}'.".format(options.setup)) @@ -855,9 +858,9 @@ class TestHarness: sys.exit('Conflict: both test setup and command line specify an exe wrapper.') if options.wrapper is None: options.wrapper = current.exe_wrapper - return T.cast(T.Dict[str, str], current.env.get_env(os.environ.copy())) + return current.env.get_env(os.environ.copy()) - def get_test_runner(self, test: 'TestSerialisation') -> SingleTestRunner: + def get_test_runner(self, test: TestSerialisation) -> SingleTestRunner: options = deepcopy(self.options) if not options.setup: options.setup = self.build_data.test_setup_default_name @@ -889,7 +892,7 @@ class TestHarness: sys.exit('Unknown test result encountered: {}'.format(result.res)) def print_stats(self, test_count: int, name_max_len: int, - tests: T.List['TestSerialisation'], + tests: T.List[TestSerialisation], name: str, result: TestRun, i: int) -> None: ok_statuses = (TestResult.OK, TestResult.EXPECTEDFAIL) bad_statuses = (TestResult.FAIL, TestResult.TIMEOUT, @@ -983,14 +986,14 @@ class TestHarness: @staticmethod def split_suite_string(suite: str) -> T.Tuple[str, str]: if ':' in suite: - # mypy can't figure out that str.split(n, 1) will return a list of - # length 2, so we have to help it. - return T.cast(T.Tuple[str, str], tuple(suite.split(':', 1))) + split = suite.split(':', 1) + assert len(split) == 2 + return split[0], split[1] else: return suite, "" @staticmethod - def test_in_suites(test: 'TestSerialisation', suites: T.List[str]) -> bool: + def test_in_suites(test: TestSerialisation, suites: T.List[str]) -> bool: for suite in suites: (prj_match, st_match) = TestHarness.split_suite_string(suite) for prjst in test.suite: @@ -1021,12 +1024,12 @@ class TestHarness: return True return False - def test_suitable(self, test: 'TestSerialisation') -> bool: + def test_suitable(self, test: TestSerialisation) -> bool: return ((not self.options.include_suites or TestHarness.test_in_suites(test, self.options.include_suites)) and not TestHarness.test_in_suites(test, self.options.exclude_suites)) - def get_tests(self) -> T.List['TestSerialisation']: + def get_tests(self) -> T.List[TestSerialisation]: if not self.tests: print('No tests defined.') return [] @@ -1089,17 +1092,17 @@ class TestHarness: wrap += options.wrapper return wrap - def get_pretty_suite(self, test: 'TestSerialisation') -> str: + def get_pretty_suite(self, test: TestSerialisation) -> str: if len(self.suites) > 1 and test.suite: rv = TestHarness.split_suite_string(test.suite[0])[0] s = "+".join(TestHarness.split_suite_string(s)[1] for s in test.suite) if s: rv += ":" - return rv + s + " / " + T.cast(str, test.name) + return rv + s + " / " + test.name else: - return T.cast(str, test.name) + return test.name - def run_tests(self, tests: T.List['TestSerialisation']) -> None: + def run_tests(self, tests: T.List[TestSerialisation]) -> None: executor = None futures = [] # type: T.List[T.Tuple[conc.Future[TestRun], int, int, T.List[TestSerialisation], str, int]] test_count = len(tests) @@ -1141,7 +1144,7 @@ class TestHarness: finally: os.chdir(startdir) - def drain_futures(self, futures: T.List[T.Tuple['conc.Future[TestRun]', int, int, T.List['TestSerialisation'], str, int]]) -> None: + def drain_futures(self, futures: T.List[T.Tuple['conc.Future[TestRun]', int, int, T.List[TestSerialisation], str, int]]) -> None: for x in futures: (result, test_count, name_max_len, tests, name, i) = x if self.options.repeat > 1 and self.fail_count: |