aboutsummaryrefslogtreecommitdiff
path: root/mesontest.py
diff options
context:
space:
mode:
Diffstat (limited to 'mesontest.py')
-rwxr-xr-xmesontest.py128
1 files changed, 98 insertions, 30 deletions
diff --git a/mesontest.py b/mesontest.py
index 73c92e4..2d834b1 100755
--- a/mesontest.py
+++ b/mesontest.py
@@ -54,6 +54,8 @@ def determine_worker_count():
parser = argparse.ArgumentParser()
parser.add_argument('--repeat', default=1, dest='repeat', type=int,
help='Number of times to run the tests.')
+parser.add_argument('--no-rebuild', default=False, action='store_true',
+ help='Do not rebuild before running tests.')
parser.add_argument('--gdb', default=False, dest='gdb', action='store_true',
help='Run test under gdb.')
parser.add_argument('--list', default=False, dest='list', action='store_true',
@@ -76,6 +78,10 @@ parser.add_argument('--num-processes', default=determine_worker_count(), type=in
help='How many parallel processes to use.')
parser.add_argument('-v', '--verbose', default=False, action='store_true',
help='Do not redirect stdout and stderr')
+parser.add_argument('-t', '--timeout-multiplier', type=float, default=1.0,
+ help='Define a multiplier for test timeout, for example '
+ ' when running tests in particular conditions they might take'
+ ' more time to execute.')
parser.add_argument('args', nargs='*')
class TestRun():
@@ -142,13 +148,35 @@ class TestHarness:
self.collected_logs = []
self.error_count = 0
self.is_run = False
+ self.cant_rebuild = False
if self.options.benchmark:
self.datafile = os.path.join(options.wd, 'meson-private/meson_benchmark_setup.dat')
else:
self.datafile = os.path.join(options.wd, 'meson-private/meson_test_setup.dat')
- print(self.datafile)
+
+ def rebuild_all(self):
+ if not os.path.isfile(os.path.join(self.options.wd, 'build.ninja')):
+ print("Only ninja backend is supported to rebuilt tests before running them.")
+ self.cant_rebuild = True
+ return True
+
+ ninja = environment.detect_ninja()
+ if not ninja:
+ print("Can't find ninja, can't rebuild test.")
+ self.cant_rebuild = True
+ return False
+
+ p = subprocess.Popen([ninja, '-C', self.options.wd])
+ (stdo, stde) = p.communicate()
+
+ if p.returncode != 0:
+ print("Could not rebuild")
+ return False
+
+ return True
def run_single_test(self, wrap, test):
+ failling = False
if test.fname[0].endswith('.jar'):
cmd = ['java', '-jar'] + test.fname
elif not test.is_cross and run_with_mono(test.fname[0]):
@@ -198,9 +226,12 @@ class TestHarness:
cwd=test.workdir,
preexec_fn=setsid)
timed_out = False
+ timeout = test.timeout * self.options.timeout_multiplier
try:
- (stdo, stde) = p.communicate(timeout=test.timeout)
+ (stdo, stde) = p.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
+ if self.options.verbose:
+ print("%s time out (After %d seconds)" % (test.name, timeout))
timed_out = True
# Python does not provide multiplatform support for
# killing a process and all its children so we need
@@ -217,6 +248,7 @@ class TestHarness:
stde = decode(stde)
if timed_out:
res = 'TIMEOUT'
+ failling = True
if p.returncode == GNU_SKIP_RETURNCODE:
res = 'SKIP'
elif (not test.should_fail and p.returncode == 0) or \
@@ -224,8 +256,14 @@ class TestHarness:
res = 'OK'
else:
res = 'FAIL'
+ failling = True
returncode = p.returncode
- return TestRun(res, returncode, test.should_fail, duration, stdo, stde, cmd, test.env)
+ result = TestRun(res, returncode, test.should_fail, duration, stdo, stde, cmd, test.env)
+
+ if failling:
+ self.failed_tests.append(result)
+
+ return result
def print_stats(self, numlen, tests, name, result, i, logfile, jsonlogfile):
startpad = ' '*(numlen - len('%d' % (i+1)))
@@ -251,7 +289,7 @@ class TestHarness:
print('Test data file. Probably this means that you did not run this in the build directory.')
return 1
self.is_run = True
- logfilename = self.run_tests(self.datafile, self.options.logbase)
+ logfilename = self.run_tests(self.options.logbase)
if len(self.collected_logs) > 0:
if len(self.collected_logs) > 10:
print('\nThe output from 10 first failed tests:\n')
@@ -268,7 +306,15 @@ class TestHarness:
print('Full log written to %s.' % logfilename)
return self.error_count
- def run_tests(self, datafilename, log_base):
+ def get_tests(self):
+ with open(self.datafile, 'rb') as f:
+ tests = pickle.load(f)
+ for test in tests:
+ test.rebuilt = False
+
+ return tests
+
+ def run_tests(self, log_base):
logfile_base = os.path.join(self.options.wd, 'meson-logs', log_base)
if self.options.wrapper is None:
wrap = []
@@ -279,8 +325,7 @@ class TestHarness:
namebase = wrap[0]
logfilename = logfile_base + '-' + namebase.replace(' ', '_') + '.txt'
jsonlogfilename = logfile_base + '-' + namebase.replace(' ', '_') + '.json'
- with open(datafilename, 'rb') as f:
- tests = pickle.load(f)
+ tests = self.get_tests()
if len(tests) == 0:
print('No tests defined.')
return
@@ -298,27 +343,28 @@ class TestHarness:
logfile.write('Log of Meson test suite run on %s.\n\n' %
datetime.datetime.now().isoformat())
- for i, test in enumerate(filtered_tests):
- if test.suite[0] == '':
- visible_name = test.name
- else:
- if self.options.suite is not None:
- visible_name = self.options.suite + ' / ' + test.name
+ for i in range(self.options.repeat):
+ for i, test in enumerate(filtered_tests):
+ if test.suite[0] == '':
+ visible_name = test.name
else:
- visible_name = test.suite[0] + ' / ' + test.name
-
- if not test.is_parallel:
- self.drain_futures(futures)
- futures = []
- res = self.run_single_test(wrap, test)
- if not self.options.verbose:
- self.print_stats(numlen, filtered_tests, visible_name, res, i,
- logfile, jsonlogfile)
- else:
- f = executor.submit(self.run_single_test, wrap, test)
- if not self.options.verbose:
- futures.append((f, numlen, filtered_tests, visible_name, i,
- logfile, jsonlogfile))
+ if self.options.suite is not None:
+ visible_name = self.options.suite + ' / ' + test.name
+ else:
+ visible_name = test.suite[0] + ' / ' + test.name
+
+ if not test.is_parallel:
+ self.drain_futures(futures)
+ futures = []
+ res = self.run_single_test(wrap, test)
+ if not self.options.verbose:
+ self.print_stats(numlen, filtered_tests, visible_name, res, i,
+ logfile, jsonlogfile)
+ else:
+ f = executor.submit(self.run_single_test, wrap, test)
+ if not self.options.verbose:
+ futures.append((f, numlen, filtered_tests, visible_name, i,
+ logfile, jsonlogfile))
self.drain_futures(futures, logfile, jsonlogfile)
finally:
if jsonlogfile:
@@ -332,8 +378,23 @@ class TestHarness:
def drain_futures(self, futures, logfile, jsonlogfile):
for i in futures:
(result, numlen, tests, name, i, logfile, jsonlogfile) = i
- if not self.options.verbose:
+ if self.options.repeat > 1 and self.failed_tests:
+ result.cancel()
+ elif not self.options.verbose:
self.print_stats(numlen, tests, name, result.result(), i, logfile, jsonlogfile)
+ else:
+ result.result()
+
+ if self.options.repeat > 1 and self.failed_tests:
+ if not self.options.verbose:
+ for res in self.failed_tests:
+ print('Test failed:\n\n-- stdout --\n')
+ print(res.stdo)
+ print('\n-- stderr --\n')
+ print(res.stde)
+ return 1
+
+ return
def run_special(self):
'Tests run by the user, usually something like "under gdb 1000 times".'
@@ -348,7 +409,7 @@ class TestHarness:
return 1
if os.path.isfile('build.ninja'):
subprocess.check_call([environment.detect_ninja(), 'all'])
- tests = pickle.load(open(self.datafile, 'rb'))
+ tests = self.get_tests()
if self.options.list:
for i in tests:
print(i.name)
@@ -358,9 +419,11 @@ class TestHarness:
for i in range(self.options.repeat):
print('Running: %s %d/%d' % (t.name, i+1, self.options.repeat))
if self.options.gdb:
- wrap = ['gdb', '--quiet', '-ex', 'run', '-ex', 'quit']
+ wrap = ['gdb', '--quiet']
if len(t.cmd_args) > 0:
wrap.append('--args')
+ if self.options.repeat > 1:
+ wrap.append('-ex', 'run', '-ex', 'quit')
res = self.run_single_test(wrap, t)
else:
@@ -389,9 +452,14 @@ def run(args):
if options.gdb:
options.verbose = True
+ options.wd = os.path.abspath(options.wd)
+
th = TestHarness(options)
if options.list:
return th.run_special()
+ if not options.no_rebuild:
+ if not th.rebuild_all():
+ return -1
elif len(options.args) == 0:
return th.doit()
return th.run_special()