aboutsummaryrefslogtreecommitdiff
path: root/mesontest.py
diff options
context:
space:
mode:
Diffstat (limited to 'mesontest.py')
-rwxr-xr-xmesontest.py235
1 files changed, 164 insertions, 71 deletions
diff --git a/mesontest.py b/mesontest.py
index 04f72df..2d834b1 100755
--- a/mesontest.py
+++ b/mesontest.py
@@ -18,15 +18,18 @@
import subprocess, sys, os, argparse
import pickle
-import mesonbuild
from mesonbuild import build
from mesonbuild import environment
-import time, datetime, pickle, multiprocessing, json
+import time, datetime, multiprocessing, json
import concurrent.futures as conc
import platform
import signal
+# GNU autotools interprets a return code of 77 from tests it executes to
+# mean that the test should be skipped.
+GNU_SKIP_RETURNCODE = 77
+
def is_windows():
platname = platform.system().lower()
return platname == 'windows' or 'mingw' in platname
@@ -51,13 +54,15 @@ def determine_worker_count():
parser = argparse.ArgumentParser()
parser.add_argument('--repeat', default=1, dest='repeat', type=int,
help='Number of times to run the tests.')
+parser.add_argument('--no-rebuild', default=False, action='store_true',
+ help='Do not rebuild before running tests.')
parser.add_argument('--gdb', default=False, dest='gdb', action='store_true',
help='Run test under gdb.')
parser.add_argument('--list', default=False, dest='list', action='store_true',
help='List available tests.')
parser.add_argument('--wrapper', default=None, dest='wrapper',
help='wrapper to run tests with (e.g. Valgrind)')
-parser.add_argument('--wd', default=None, dest='wd',
+parser.add_argument('-C', default='.', dest='wd',
help='directory to cd into before running')
parser.add_argument('--suite', default=None, dest='suite',
help='Only run tests belonging to the given suite.')
@@ -71,6 +76,12 @@ parser.add_argument('--logbase', default='testlog',
help="Base name for log file.")
parser.add_argument('--num-processes', default=determine_worker_count(), type=int,
help='How many parallel processes to use.')
+parser.add_argument('-v', '--verbose', default=False, action='store_true',
+ help='Do not redirect stdout and stderr')
+parser.add_argument('-t', '--timeout-multiplier', type=float, default=1.0,
+ help='Define a multiplier for test timeout, for example '
+ ' when running tests in particular conditions they might take'
+ ' more time to execute.')
parser.add_argument('args', nargs='*')
class TestRun():
@@ -107,6 +118,8 @@ class TestRun():
return res
def decode(stream):
+ if stream is None:
+ return ''
try:
return stream.decode('utf-8')
except UnicodeDecodeError:
@@ -135,12 +148,35 @@ class TestHarness:
self.collected_logs = []
self.error_count = 0
self.is_run = False
+ self.cant_rebuild = False
if self.options.benchmark:
- self.datafile = 'meson-private/meson_benchmark_setup.dat'
+ self.datafile = os.path.join(options.wd, 'meson-private/meson_benchmark_setup.dat')
else:
- self.datafile = 'meson-private/meson_test_setup.dat'
+ self.datafile = os.path.join(options.wd, 'meson-private/meson_test_setup.dat')
+
+ def rebuild_all(self):
+ if not os.path.isfile(os.path.join(self.options.wd, 'build.ninja')):
+ print("Only ninja backend is supported to rebuilt tests before running them.")
+ self.cant_rebuild = True
+ return True
+
+ ninja = environment.detect_ninja()
+ if not ninja:
+ print("Can't find ninja, can't rebuild test.")
+ self.cant_rebuild = True
+ return False
+
+ p = subprocess.Popen([ninja, '-C', self.options.wd])
+ (stdo, stde) = p.communicate()
+
+ if p.returncode != 0:
+ print("Could not rebuild")
+ return False
+
+ return True
def run_single_test(self, wrap, test):
+ failling = False
if test.fname[0].endswith('.jar'):
cmd = ['java', '-jar'] + test.fname
elif not test.is_cross and run_with_mono(test.fname[0]):
@@ -155,6 +191,7 @@ class TestHarness:
cmd = [test.exe_runner] + test.fname
else:
cmd = test.fname
+
if cmd is None:
res = 'SKIP'
duration = 0.0
@@ -171,20 +208,30 @@ class TestHarness:
child_env.update(test.env)
if len(test.extra_paths) > 0:
child_env['PATH'] = child_env['PATH'] + ';'.join([''] + test.extra_paths)
- if is_windows():
- setsid = None
- else:
- setsid = os.setsid
+
+ setsid = None
+ stdout = None
+ stderr = None
+ if not self.options.verbose:
+ stdout = subprocess.PIPE
+ stderr = subprocess.PIPE if self.options and self.options.split else subprocess.STDOUT
+
+ if not is_windows():
+ setsid = os.setsid
+
p = subprocess.Popen(cmd,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE if self.options and self.options.split else subprocess.STDOUT,
+ stdout=stdout,
+ stderr=stderr,
env=child_env,
cwd=test.workdir,
preexec_fn=setsid)
timed_out = False
+ timeout = test.timeout * self.options.timeout_multiplier
try:
- (stdo, stde) = p.communicate(timeout=test.timeout)
+ (stdo, stde) = p.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
+ if self.options.verbose:
+ print("%s time out (After %d seconds)" % (test.name, timeout))
timed_out = True
# Python does not provide multiplatform support for
# killing a process and all its children so we need
@@ -201,13 +248,22 @@ class TestHarness:
stde = decode(stde)
if timed_out:
res = 'TIMEOUT'
+ failling = True
+ if p.returncode == GNU_SKIP_RETURNCODE:
+ res = 'SKIP'
elif (not test.should_fail and p.returncode == 0) or \
(test.should_fail and p.returncode != 0):
res = 'OK'
else:
res = 'FAIL'
+ failling = True
returncode = p.returncode
- return TestRun(res, returncode, test.should_fail, duration, stdo, stde, cmd, test.env)
+ result = TestRun(res, returncode, test.should_fail, duration, stdo, stde, cmd, test.env)
+
+ if failling:
+ self.failed_tests.append(result)
+
+ return result
def print_stats(self, numlen, tests, name, result, i, logfile, jsonlogfile):
startpad = ' '*(numlen - len('%d' % (i+1)))
@@ -218,7 +274,8 @@ class TestHarness:
(num, name, padding1, result.res, padding2, result.duration)
print(result_str)
result_str += "\n\n" + result.get_log()
- if (result.returncode != 0) != result.should_fail:
+ if (result.returncode != GNU_SKIP_RETURNCODE) and \
+ (result.returncode != 0) != result.should_fail:
self.error_count += 1
if self.options.print_errorlogs:
self.collected_logs.append(result_str)
@@ -232,7 +289,7 @@ class TestHarness:
print('Test data file. Probably this means that you did not run this in the build directory.')
return 1
self.is_run = True
- logfilename = self.run_tests(self.datafile, self.options.logbase)
+ logfilename = self.run_tests(self.options.logbase)
if len(self.collected_logs) > 0:
if len(self.collected_logs) > 10:
print('\nThe output from 10 first failed tests:\n')
@@ -249,8 +306,16 @@ class TestHarness:
print('Full log written to %s.' % logfilename)
return self.error_count
- def run_tests(self, datafilename, log_base):
- logfile_base = os.path.join('meson-logs', log_base)
+ def get_tests(self):
+ with open(self.datafile, 'rb') as f:
+ tests = pickle.load(f)
+ for test in tests:
+ test.rebuilt = False
+
+ return tests
+
+ def run_tests(self, log_base):
+ logfile_base = os.path.join(self.options.wd, 'meson-logs', log_base)
if self.options.wrapper is None:
wrap = []
logfilename = logfile_base + '.txt'
@@ -260,8 +325,7 @@ class TestHarness:
namebase = wrap[0]
logfilename = logfile_base + '-' + namebase.replace(' ', '_') + '.txt'
jsonlogfilename = logfile_base + '-' + namebase.replace(' ', '_') + '.json'
- with open(datafilename, 'rb') as f:
- tests = pickle.load(f)
+ tests = self.get_tests()
if len(tests) == 0:
print('No tests defined.')
return
@@ -270,37 +334,67 @@ class TestHarness:
futures = []
filtered_tests = filter_tests(self.options.suite, tests)
- with open(jsonlogfilename, 'w') as jsonlogfile, \
- open(logfilename, 'w') as logfile:
- logfile.write('Log of Meson test suite run on %s.\n\n' %
- datetime.datetime.now().isoformat())
- for i, test in enumerate(filtered_tests):
- if test.suite[0] == '':
- visible_name = test.name
- else:
- if self.options.suite is not None:
- visible_name = self.options.suite + ' / ' + test.name
+ jsonlogfile = None
+ logfile = None
+ try:
+ if not self.options.verbose:
+ jsonlogfile = open(jsonlogfilename, 'w')
+ logfile = open(logfilename, 'w')
+ logfile.write('Log of Meson test suite run on %s.\n\n' %
+ datetime.datetime.now().isoformat())
+
+ for i in range(self.options.repeat):
+ for i, test in enumerate(filtered_tests):
+ if test.suite[0] == '':
+ visible_name = test.name
else:
- visible_name = test.suite[0] + ' / ' + test.name
-
- if not test.is_parallel:
- self.drain_futures(futures)
- futures = []
- res = self.run_single_test(wrap, test)
- self.print_stats(numlen, filtered_tests, visible_name, res, i,
- logfile, jsonlogfile)
- else:
- f = executor.submit(self.run_single_test, wrap, test)
- futures.append((f, numlen, filtered_tests, visible_name, i,
- logfile, jsonlogfile))
- self.drain_futures(futures)
+ if self.options.suite is not None:
+ visible_name = self.options.suite + ' / ' + test.name
+ else:
+ visible_name = test.suite[0] + ' / ' + test.name
+
+ if not test.is_parallel:
+ self.drain_futures(futures)
+ futures = []
+ res = self.run_single_test(wrap, test)
+ if not self.options.verbose:
+ self.print_stats(numlen, filtered_tests, visible_name, res, i,
+ logfile, jsonlogfile)
+ else:
+ f = executor.submit(self.run_single_test, wrap, test)
+ if not self.options.verbose:
+ futures.append((f, numlen, filtered_tests, visible_name, i,
+ logfile, jsonlogfile))
+ self.drain_futures(futures, logfile, jsonlogfile)
+ finally:
+ if jsonlogfile:
+ jsonlogfile.close()
+ if logfile:
+ logfile.close()
+
return logfilename
- def drain_futures(self, futures):
+ def drain_futures(self, futures, logfile, jsonlogfile):
for i in futures:
(result, numlen, tests, name, i, logfile, jsonlogfile) = i
- self.print_stats(numlen, tests, name, result.result(), i, logfile, jsonlogfile)
+ if self.options.repeat > 1 and self.failed_tests:
+ result.cancel()
+ elif not self.options.verbose:
+ self.print_stats(numlen, tests, name, result.result(), i, logfile, jsonlogfile)
+ else:
+ result.result()
+
+ if self.options.repeat > 1 and self.failed_tests:
+ if not self.options.verbose:
+ for res in self.failed_tests:
+ print('Test failed:\n\n-- stdout --\n')
+ print(res.stdo)
+ print('\n-- stderr --\n')
+ print(res.stde)
+ return 1
+
+ return
def run_special(self):
'Tests run by the user, usually something like "under gdb 1000 times".'
@@ -315,7 +409,7 @@ class TestHarness:
return 1
if os.path.isfile('build.ninja'):
subprocess.check_call([environment.detect_ninja(), 'all'])
- tests = pickle.load(open(self.datafile, 'rb'))
+ tests = self.get_tests()
if self.options.list:
for i in tests:
print(i.name)
@@ -325,15 +419,22 @@ class TestHarness:
for i in range(self.options.repeat):
print('Running: %s %d/%d' % (t.name, i+1, self.options.repeat))
if self.options.gdb:
- gdbrun(t)
+ wrap = ['gdb', '--quiet']
+ if len(t.cmd_args) > 0:
+ wrap.append('--args')
+ if self.options.repeat > 1:
+ wrap.append('-ex', 'run', '-ex', 'quit')
+
+ res = self.run_single_test(wrap, t)
else:
res = self.run_single_test(wrap, t)
if (res.returncode == 0 and res.should_fail) or \
- (res.returncode != 0 and not res.should_fail):
- print('Test failed:\n\n-- stdout --\n')
- print(res.stdo)
- print('\n-- stderr --\n')
- print(res.stde)
+ (res.returncode != 0 and not res.should_fail):
+ if not self.options.verbose:
+ print('Test failed:\n\n-- stdout --\n')
+ print(res.stdo)
+ print('\n-- stderr --\n')
+ print(res.stde)
return 1
return 0
@@ -342,32 +443,24 @@ def filter_tests(suite, tests):
return tests
return [x for x in tests if suite in x.suite]
-def gdbrun(test):
- child_env = os.environ.copy()
- child_env.update(test.env)
- # On success will exit cleanly. On failure gdb will ask user
- # if they really want to exit.
- exe = test.fname
- args = test.cmd_args
- if len(args) > 0:
- argset = ['-ex', 'set args ' + ' '.join(args)]
- else:
- argset = []
- cmd = ['gdb', '--quiet'] + argset + ['-ex', 'run', '-ex', 'quit'] + exe
- # FIXME a ton of stuff. run_single_test grabs stdout & co,
- # which we do not want to do when running under gdb.
- p = subprocess.Popen(cmd,
- env=child_env,
- cwd=test.workdir,
- )
- p.communicate()
def run(args):
options = parser.parse_args(args)
if options.benchmark:
options.num_processes = 1
+
+ if options.gdb:
+ options.verbose = True
+
+ options.wd = os.path.abspath(options.wd)
+
th = TestHarness(options)
- if len(options.args) == 0:
+ if options.list:
+ return th.run_special()
+ if not options.no_rebuild:
+ if not th.rebuild_all():
+ return -1
+ elif len(options.args) == 0:
return th.doit()
return th.run_special()