aboutsummaryrefslogtreecommitdiff
path: root/mesontest.py
diff options
context:
space:
mode:
authorJussi Pakkanen <jpakkane@gmail.com>2016-11-18 21:31:49 +0200
committerJussi Pakkanen <jpakkane@gmail.com>2016-11-18 22:10:22 +0200
commitc7ddce163fb8f440c96649a3319ab9f1f62e38be (patch)
tree70a4778f2a8c2f67ccda1317234143b9b7f225b1 /mesontest.py
parent39df22bf539ce3359679643256b854f34c13fa16 (diff)
downloadmeson-c7ddce163fb8f440c96649a3319ab9f1f62e38be.zip
meson-c7ddce163fb8f440c96649a3319ab9f1f62e38be.tar.gz
meson-c7ddce163fb8f440c96649a3319ab9f1f62e38be.tar.bz2
All testing is now in mesontest.py, which simplifies a lot of stuff.
Diffstat (limited to 'mesontest.py')
-rwxr-xr-xmesontest.py294
1 files changed, 285 insertions, 9 deletions
diff --git a/mesontest.py b/mesontest.py
index 3a2b0a3..135d463 100755
--- a/mesontest.py
+++ b/mesontest.py
@@ -18,19 +18,291 @@
import subprocess, sys, os, argparse
import pickle
-from mesonbuild.scripts import meson_test, meson_benchmark
+import mesonbuild
+from mesonbuild import build
from mesonbuild import environment
+import time, datetime, pickle, multiprocessing, json
+import concurrent.futures as conc
+import platform
+import signal
+
+def is_windows():
+ platname = platform.system().lower()
+ return platname == 'windows' or 'mingw' in platname
+
+def determine_worker_count():
+ varname = 'MESON_TESTTHREADS'
+ if varname in os.environ:
+ try:
+ num_workers = int(os.environ[varname])
+ except ValueError:
+ print('Invalid value in %s, using 1 thread.' % varname)
+ num_workers = 1
+ else:
+ try:
+ # Fails in some weird environments such as Debian
+ # reproducible build.
+ num_workers = multiprocessing.cpu_count()
+ except Exception:
+ num_workers = 1
+ return num_workers
+
parser = argparse.ArgumentParser()
parser.add_argument('--repeat', default=1, dest='repeat', type=int,
help='Number of times to run the tests.')
-parser.add_argument('--wrapper', default='', dest='wrapper',
- help='Exe wrapper (such as Valgrind) to use')
parser.add_argument('--gdb', default=False, dest='gdb', action='store_true',
help='Run test under gdb.')
parser.add_argument('--list', default=False, dest='list', action='store_true',
help='List available tests.')
-parser.add_argument('tests', nargs='*')
+parser.add_argument('--wrapper', default=None, dest='wrapper',
+ help='wrapper to run tests with (e.g. Valgrind)')
+parser.add_argument('--wd', default=None, dest='wd',
+ help='directory to cd into before running')
+parser.add_argument('--suite', default=None, dest='suite',
+ help='Only run tests belonging to the given suite.')
+parser.add_argument('--no-stdsplit', default=True, dest='split', action='store_false',
+ help='Do not split stderr and stdout in test logs.')
+parser.add_argument('--print-errorlogs', default=False, action='store_true',
+ help="Whether to print faling tests' logs.")
+parser.add_argument('--benchmark', default=False, action='store_true',
+ help="Run benchmarks instead of tests.")
+parser.add_argument('--logbase', default='testlog',
+ help="Base name for log file.")
+parser.add_argument('--num-processes', default=determine_worker_count(), type=int,
+ help='How many parallel processes to use.')
+parser.add_argument('args', nargs='*')
+
+class TestRun():
+ def __init__(self, res, returncode, should_fail, duration, stdo, stde, cmd,
+ env):
+ self.res = res
+ self.returncode = returncode
+ self.duration = duration
+ self.stdo = stdo
+ self.stde = stde
+ self.cmd = cmd
+ self.env = env
+ self.should_fail = should_fail
+
+ def get_log(self):
+ res = '--- command ---\n'
+ if self.cmd is None:
+ res += 'NONE\n'
+ else:
+ res += "\n%s %s\n" %(' '.join(
+ ["%s='%s'" % (k, v) for k, v in self.env.items()]),
+ ' ' .join(self.cmd))
+ if self.stdo:
+ res += '--- stdout ---\n'
+ res += self.stdo
+ if self.stde:
+ if res[-1:] != '\n':
+ res += '\n'
+ res += '--- stderr ---\n'
+ res += self.stde
+ if res[-1:] != '\n':
+ res += '\n'
+ res += '-------\n\n'
+ return res
+
+def decode(stream):
+ try:
+ return stream.decode('utf-8')
+ except UnicodeDecodeError:
+ return stream.decode('iso-8859-1', errors='ignore')
+
+def write_json_log(jsonlogfile, test_name, result):
+ jresult = {'name' : test_name,
+ 'stdout' : result.stdo,
+ 'result' : result.res,
+ 'duration' : result.duration,
+ 'returncode' : result.returncode,
+ 'command' : result.cmd,
+ 'env' : result.env}
+ if result.stde:
+ jresult['stderr'] = result.stde
+ jsonlogfile.write(json.dumps(jresult) + '\n')
+
+def run_with_mono(fname):
+ if fname.endswith('.exe') and not is_windows():
+ return True
+ return False
+
+class TestHarness:
+ def __init__(self, options):
+ self.options = options
+ self.collected_logs = []
+ self.error_count = 0
+ self.is_run = False
+
+ def run_single_test(self, wrap, test):
+ if test.fname[0].endswith('.jar'):
+ cmd = ['java', '-jar'] + test.fname
+ elif not test.is_cross and run_with_mono(test.fname[0]):
+ cmd = ['mono'] + test.fname
+ else:
+ if test.is_cross:
+ if test.exe_runner is None:
+ # Can not run test on cross compiled executable
+ # because there is no execute wrapper.
+ cmd = None
+ else:
+ cmd = [test.exe_runner] + test.fname
+ else:
+ cmd = test.fname
+ if cmd is None:
+ res = 'SKIP'
+ duration = 0.0
+ stdo = 'Not run because can not execute cross compiled binaries.'
+ stde = None
+ returncode = -1
+ else:
+ cmd = wrap + cmd + test.cmd_args
+ starttime = time.time()
+ child_env = os.environ.copy()
+ if isinstance(test.env, build.EnvironmentVariables):
+ test.env = test.env.get_env(child_env)
+
+ child_env.update(test.env)
+ if len(test.extra_paths) > 0:
+ child_env['PATH'] = child_env['PATH'] + ';'.join([''] + test.extra_paths)
+ if is_windows():
+ setsid = None
+ else:
+ setsid = os.setsid
+ p = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE if self.options and self.options.split else subprocess.STDOUT,
+ env=child_env,
+ cwd=test.workdir,
+ preexec_fn=setsid)
+ timed_out = False
+ try:
+ (stdo, stde) = p.communicate(timeout=test.timeout)
+ except subprocess.TimeoutExpired:
+ timed_out = True
+ # Python does not provide multiplatform support for
+ # killing a process and all its children so we need
+ # to roll our own.
+ if is_windows():
+ subprocess.call(['taskkill', '/F', '/T', '/PID', str(p.pid)])
+ else:
+ os.killpg(os.getpgid(p.pid), signal.SIGKILL)
+ (stdo, stde) = p.communicate()
+ endtime = time.time()
+ duration = endtime - starttime
+ stdo = decode(stdo)
+ if stde:
+ stde = decode(stde)
+ if timed_out:
+ res = 'TIMEOUT'
+ elif (not test.should_fail and p.returncode == 0) or \
+ (test.should_fail and p.returncode != 0):
+ res = 'OK'
+ else:
+ res = 'FAIL'
+ returncode = p.returncode
+ return TestRun(res, returncode, test.should_fail, duration, stdo, stde, cmd, test.env)
+
+ def print_stats(self, numlen, tests, name, result, i, logfile, jsonlogfile):
+ startpad = ' '*(numlen - len('%d' % (i+1)))
+ num = '%s%d/%d' % (startpad, i+1, len(tests))
+ padding1 = ' '*(38-len(name))
+ padding2 = ' '*(8-len(result.res))
+ result_str = '%s %s %s%s%s%5.2f s' % \
+ (num, name, padding1, result.res, padding2, result.duration)
+ print(result_str)
+ result_str += "\n\n" + result.get_log()
+ if (result.returncode != 0) != result.should_fail:
+ self.error_count += 1
+ if self.options.print_errorlogs:
+ self.collected_logs.append(result_str)
+ logfile.write(result_str)
+ write_json_log(jsonlogfile, name, result)
+
+ def doit(self):
+ if self.options.benchmark:
+ datafile = 'meson-private/meson_benchmark_setup.dat'
+ else:
+ datafile = 'meson-private/meson_test_setup.dat'
+ if self.is_run:
+ raise RuntimeError('Test harness object can only be used once.')
+ self.is_run = True
+ logfilename = self.run_tests(datafile, self.options.logbase)
+ if len(self.collected_logs) > 0:
+ if len(self.collected_logs) > 10:
+ print('\nThe output from 10 first failed tests:\n')
+ else:
+ print('\nThe output from the failed tests:\n')
+ for log in self.collected_logs[:10]:
+ lines = log.splitlines()
+ if len(lines) > 100:
+ print(lines[0])
+ print('--- Listing only the last 100 lines from a long log. ---')
+ lines = lines[-99:]
+ for line in lines:
+ print(line)
+ print('Full log written to %s.' % logfilename)
+ return self.error_count
+
+ def run_tests(self, datafilename, log_base):
+ logfile_base = os.path.join('meson-logs', log_base)
+ if self.options.wrapper is None:
+ wrap = []
+ logfilename = logfile_base + '.txt'
+ jsonlogfilename = logfile_base+ '.json'
+ else:
+ wrap = self.options.wrapper.split()
+ namebase = wrap[0]
+ logfilename = logfile_base + '-' + namebase.replace(' ', '_') + '.txt'
+ jsonlogfilename = logfile_base + '-' + namebase.replace(' ', '_') + '.json'
+ with open(datafilename, 'rb') as f:
+ tests = pickle.load(f)
+ if len(tests) == 0:
+ print('No tests defined.')
+ return
+ numlen = len('%d' % len(tests))
+ executor = conc.ThreadPoolExecutor(max_workers=self.options.num_processes)
+ futures = []
+ filtered_tests = filter_tests(self.options.suite, tests)
+
+ with open(jsonlogfilename, 'w') as jsonlogfile, \
+ open(logfilename, 'w') as logfile:
+ logfile.write('Log of Meson test suite run on %s.\n\n' %
+ datetime.datetime.now().isoformat())
+ for i, test in enumerate(filtered_tests):
+ if test.suite[0] == '':
+ visible_name = test.name
+ else:
+ if self.options.suite is not None:
+ visible_name = self.options.suite + ' / ' + test.name
+ else:
+ visible_name = test.suite[0] + ' / ' + test.name
+
+ if not test.is_parallel:
+ self.drain_futures(futures)
+ futures = []
+ res = self.run_single_test(wrap, test)
+ self.print_stats(numlen, filtered_tests, visible_name, res, i,
+ logfile, jsonlogfile)
+ else:
+ f = executor.submit(self.run_single_test, wrap, test)
+ futures.append((f, numlen, filtered_tests, visible_name, i,
+ logfile, jsonlogfile))
+ self.drain_futures(futures)
+ return logfilename
+
+
+ def drain_futures(self, futures):
+ for i in futures:
+ (result, numlen, tests, name, i, logfile, jsonlogfile) = i
+ self.print_stats(numlen, tests, name, result.result(), i, logfile, jsonlogfile)
+
+def filter_tests(suite, tests):
+ if suite is None:
+ return tests
+ return [x for x in tests if suite in x.suite]
def gdbrun(test):
child_env = os.environ.copy()
@@ -53,15 +325,19 @@ def gdbrun(test):
p.communicate()
def run(args):
- datafile = 'meson-private/meson_test_setup.dat'
+ options = parser.parse_args(args)
+ if options.benchmark:
+ options.num_processes = 1
+ th = TestHarness(options)
+ return th.doit()
if not os.path.isfile(datafile):
print('Test data file. Probably this means that you did not run this in the build directory.')
return 1
+ datafile = os.path.join(os.curdir, datafile)
if os.path.isfile('build.ninja'):
subprocess.check_call([environment.detect_ninja(), 'all'])
- if len(args) > 0 and args[0] == '--benchmark':
- return meson_benchmark.run(args[1:] + ['meson-private/meson_benchmark_setup.dat'])
- options = parser.parse_args(args)
+ if options.wd is not None:
+ os.chdir(options.wd)
if len(options.tests) == 0:
# Run basic tests.
return meson_test.run(args + ['meson-private/meson_test_setup.dat'])
@@ -84,7 +360,7 @@ def run(args):
if options.gdb:
gdbrun(t)
else:
- res = meson_test.run_single_test(wrap, t)
+ res = run_single_test(wrap, t)
if (res.returncode == 0 and res.should_fail) or \
(res.returncode != 0 and not res.should_fail):
print(res.stdo)