aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJussi Pakkanen <jpakkane@gmail.com>2016-11-18 20:32:41 +0200
committerJussi Pakkanen <jpakkane@gmail.com>2016-11-18 22:08:07 +0200
commit39df22bf539ce3359679643256b854f34c13fa16 (patch)
treebe980cbd72c9c04369bf9516222244d7b137adf8
parente24229eae718c8b177074de27b30fdcfb26ff4a6 (diff)
downloadmeson-39df22bf539ce3359679643256b854f34c13fa16.zip
meson-39df22bf539ce3359679643256b854f34c13fa16.tar.gz
meson-39df22bf539ce3359679643256b854f34c13fa16.tar.bz2
Made Meson test into a class rather than abusing global variables and as preparation for moving it elsewhere.
-rwxr-xr-xmesonbuild/scripts/meson_test.py352
-rwxr-xr-xmesontest.py8
2 files changed, 181 insertions, 179 deletions
diff --git a/mesonbuild/scripts/meson_test.py b/mesonbuild/scripts/meson_test.py
index 5538d84..42f2c8e 100755
--- a/mesonbuild/scripts/meson_test.py
+++ b/mesonbuild/scripts/meson_test.py
@@ -26,9 +26,22 @@ def is_windows():
platname = platform.system().lower()
return platname == 'windows' or 'mingw' in platname
-collected_logs = []
-error_count = 0
-options = None
+def determine_worker_count():
+ varname = 'MESON_TESTTHREADS'
+ if varname in os.environ:
+ try:
+ num_workers = int(os.environ[varname])
+ except ValueError:
+ print('Invalid value in %s, using 1 thread.' % varname)
+ num_workers = 1
+ else:
+ try:
+ # Fails in some weird environments such as Debian
+ # reproducible build.
+ num_workers = multiprocessing.cpu_count()
+ except Exception:
+ num_workers = 1
+ return num_workers
parser = argparse.ArgumentParser()
parser.add_argument('--wrapper', default=None, dest='wrapper',
@@ -43,7 +56,7 @@ parser.add_argument('--print-errorlogs', default=False, action='store_true',
help="Whether to print faling tests' logs.")
parser.add_argument('--logbase', default='testlog',
help="Base name for log file.")
-parser.add_argument('--num-processes', default=None,
+parser.add_argument('--num-processes', default=determine_worker_count(), type=int,
help='How many parallel processes to use.')
parser.add_argument('args', nargs='*')
@@ -104,201 +117,184 @@ def run_with_mono(fname):
return True
return False
-def run_single_test(wrap, test):
- global options
- if test.fname[0].endswith('.jar'):
- cmd = ['java', '-jar'] + test.fname
- elif not test.is_cross and run_with_mono(test.fname[0]):
- cmd = ['mono'] + test.fname
- else:
- if test.is_cross:
- if test.exe_runner is None:
- # Can not run test on cross compiled executable
- # because there is no execute wrapper.
- cmd = None
+class TestHarness:
+ def __init__(self, options):
+ self.options = options
+ self.collected_logs = []
+ self.error_count = 0
+ self.datafile = options.args[0]
+
+ def run_single_test(self, wrap, test):
+ if test.fname[0].endswith('.jar'):
+ cmd = ['java', '-jar'] + test.fname
+ elif not test.is_cross and run_with_mono(test.fname[0]):
+ cmd = ['mono'] + test.fname
+ else:
+ if test.is_cross:
+ if test.exe_runner is None:
+ # Can not run test on cross compiled executable
+ # because there is no execute wrapper.
+ cmd = None
+ else:
+ cmd = [test.exe_runner] + test.fname
else:
- cmd = [test.exe_runner] + test.fname
+ cmd = test.fname
+ if cmd is None:
+ res = 'SKIP'
+ duration = 0.0
+ stdo = 'Not run because can not execute cross compiled binaries.'
+ stde = None
+ returncode = -1
else:
- cmd = test.fname
- if cmd is None:
- res = 'SKIP'
- duration = 0.0
- stdo = 'Not run because can not execute cross compiled binaries.'
- stde = None
- returncode = -1
- else:
- cmd = wrap + cmd + test.cmd_args
- starttime = time.time()
- child_env = os.environ.copy()
- if isinstance(test.env, build.EnvironmentVariables):
- test.env = test.env.get_env(child_env)
+ cmd = wrap + cmd + test.cmd_args
+ starttime = time.time()
+ child_env = os.environ.copy()
+ if isinstance(test.env, build.EnvironmentVariables):
+ test.env = test.env.get_env(child_env)
- child_env.update(test.env)
- if len(test.extra_paths) > 0:
- child_env['PATH'] = (child_env['PATH'] +
- os.pathsep.join([''] + test.extra_paths))
- if is_windows():
- setsid = None
- else:
- setsid = os.setsid
- p = subprocess.Popen(cmd,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE if options and options.split else subprocess.STDOUT,
- env=child_env,
- cwd=test.workdir,
- preexec_fn=setsid)
- timed_out = False
- try:
- (stdo, stde) = p.communicate(timeout=test.timeout)
- except subprocess.TimeoutExpired:
- timed_out = True
- # Python does not provide multiplatform support for
- # killing a process and all its children so we need
- # to roll our own.
+ child_env.update(test.env)
+ if len(test.extra_paths) > 0:
+ child_env['PATH'] = child_env['PATH'] + os.pathsep.join([''] + test.extra_paths)
if is_windows():
- subprocess.call(['taskkill', '/F', '/T', '/PID', str(p.pid)])
+ setsid = None
else:
- os.killpg(os.getpgid(p.pid), signal.SIGKILL)
- (stdo, stde) = p.communicate()
- endtime = time.time()
- duration = endtime - starttime
- stdo = decode(stdo)
- if stde:
- stde = decode(stde)
- if timed_out:
- res = 'TIMEOUT'
- elif (not test.should_fail and p.returncode == 0) or \
- (test.should_fail and p.returncode != 0):
- res = 'OK'
+ setsid = os.setsid
+ p = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE if self.options and self.options.split else subprocess.STDOUT,
+ env=child_env,
+ cwd=test.workdir,
+ preexec_fn=setsid)
+ timed_out = False
+ try:
+ (stdo, stde) = p.communicate(timeout=test.timeout)
+ except subprocess.TimeoutExpired:
+ timed_out = True
+ # Python does not provide multiplatform support for
+ # killing a process and all its children so we need
+ # to roll our own.
+ if is_windows():
+ subprocess.call(['taskkill', '/F', '/T', '/PID', str(p.pid)])
+ else:
+ os.killpg(os.getpgid(p.pid), signal.SIGKILL)
+ (stdo, stde) = p.communicate()
+ endtime = time.time()
+ duration = endtime - starttime
+ stdo = decode(stdo)
+ if stde:
+ stde = decode(stde)
+ if timed_out:
+ res = 'TIMEOUT'
+ elif (not test.should_fail and p.returncode == 0) or \
+ (test.should_fail and p.returncode != 0):
+ res = 'OK'
+ else:
+ res = 'FAIL'
+ returncode = p.returncode
+ return TestRun(res, returncode, test.should_fail, duration, stdo, stde, cmd, test.env)
+
+ def print_stats(self, numlen, tests, name, result, i, logfile, jsonlogfile):
+ startpad = ' '*(numlen - len('%d' % (i+1)))
+ num = '%s%d/%d' % (startpad, i+1, len(tests))
+ padding1 = ' '*(38-len(name))
+ padding2 = ' '*(8-len(result.res))
+ result_str = '%s %s %s%s%s%5.2f s' % \
+ (num, name, padding1, result.res, padding2, result.duration)
+ print(result_str)
+ result_str += "\n\n" + result.get_log()
+ if (result.returncode != 0) != result.should_fail:
+ self.error_count += 1
+ if self.options.print_errorlogs:
+ self.collected_logs.append(result_str)
+ logfile.write(result_str)
+ write_json_log(jsonlogfile, name, result)
+
+ def doit(self):
+ datafilename = self.options.args[0]
+ logfilename = self.run_tests(datafilename, self.options.logbase)
+ if len(self.collected_logs) > 0:
+ if len(self.collected_logs) > 10:
+ print('\nThe output from 10 first failed tests:\n')
+ else:
+ print('\nThe output from the failed tests:\n')
+ for log in self.collected_logs[:10]:
+ lines = log.splitlines()
+ if len(lines) > 100:
+ print(lines[0])
+ print('--- Listing only the last 100 lines from a long log. ---')
+ lines = lines[-99:]
+ for line in lines:
+ print(line)
+ print('Full log written to %s.' % logfilename)
+ return self.error_count
+
+ def run_tests(self, datafilename, log_base):
+ logfile_base = os.path.join('meson-logs', log_base)
+ if self.options.wrapper is None:
+ wrap = []
+ logfilename = logfile_base + '.txt'
+ jsonlogfilename = logfile_base+ '.json'
else:
- res = 'FAIL'
- returncode = p.returncode
- return TestRun(res, returncode, test.should_fail, duration, stdo, stde, cmd, test.env)
+ wrap = self.options.wrapper.split()
+ namebase = wrap[0]
+ logfilename = logfile_base + '-' + namebase.replace(' ', '_') + '.txt'
+ jsonlogfilename = logfile_base + '-' + namebase.replace(' ', '_') + '.json'
+ with open(datafilename, 'rb') as f:
+ tests = pickle.load(f)
+ if len(tests) == 0:
+ print('No tests defined.')
+ return
+ numlen = len('%d' % len(tests))
+ executor = conc.ThreadPoolExecutor(max_workers=self.options.num_processes)
+ futures = []
+ filtered_tests = filter_tests(self.options.suite, tests)
-def print_stats(numlen, tests, name, result, i, logfile, jsonlogfile):
- global collected_logs, error_count, options
- startpad = ' '*(numlen - len('%d' % (i+1)))
- num = '%s%d/%d' % (startpad, i+1, len(tests))
- padding1 = ' '*(38-len(name))
- padding2 = ' '*(8-len(result.res))
- result_str = '%s %s %s%s%s%5.2f s' % \
- (num, name, padding1, result.res, padding2, result.duration)
- print(result_str)
- result_str += "\n\n" + result.get_log()
- if (result.returncode != 0) != result.should_fail:
- error_count += 1
- if options.print_errorlogs:
- collected_logs.append(result_str)
- logfile.write(result_str)
- write_json_log(jsonlogfile, name, result)
+ with open(jsonlogfilename, 'w') as jsonlogfile, \
+ open(logfilename, 'w') as logfile:
+ logfile.write('Log of Meson test suite run on %s.\n\n' %
+ datetime.datetime.now().isoformat())
+ for i, test in enumerate(filtered_tests):
+ if test.suite[0] == '':
+ visible_name = test.name
+ else:
+ if self.options.suite is not None:
+ visible_name = self.options.suite + ' / ' + test.name
+ else:
+ visible_name = test.suite[0] + ' / ' + test.name
+
+ if not test.is_parallel:
+ self.drain_futures(futures)
+ futures = []
+ res = self.run_single_test(wrap, test)
+ print_stats(numlen, filtered_tests, visible_name, res, i,
+ logfile, jsonlogfile)
+ else:
+ f = executor.submit(self.run_single_test, wrap, test)
+ futures.append((f, numlen, filtered_tests, visible_name, i,
+ logfile, jsonlogfile))
+ self.drain_futures(futures)
+ return logfilename
-def drain_futures(futures):
- for i in futures:
- (result, numlen, tests, name, i, logfile, jsonlogfile) = i
- print_stats(numlen, tests, name, result.result(), i, logfile, jsonlogfile)
+
+ def drain_futures(self, futures):
+ for i in futures:
+ (result, numlen, tests, name, i, logfile, jsonlogfile) = i
+ self.print_stats(numlen, tests, name, result.result(), i, logfile, jsonlogfile)
def filter_tests(suite, tests):
if suite is None:
return tests
return [x for x in tests if suite in x.suite]
-def determine_worker_count():
- varname = 'MESON_TESTTHREADS'
- if varname in os.environ:
- try:
- num_workers = int(os.environ[varname])
- except ValueError:
- print('Invalid value in %s, using 1 thread.' % varname)
- num_workers = 1
- else:
- try:
- # Fails in some weird environments such as Debian
- # reproducible build.
- num_workers = multiprocessing.cpu_count()
- except Exception:
- num_workers = 1
- return num_workers
-
-def run_tests(datafilename, log_base, num_workers=None):
- global options
- if num_workers is None:
- num_workers = determine_worker_count()
- else:
- num_workers = int(num_workers)
- logfile_base = os.path.join('meson-logs', log_base)
- if options.wrapper is None:
- wrap = []
- logfilename = logfile_base + '.txt'
- jsonlogfilename = logfile_base+ '.json'
- else:
- wrap = options.wrapper.split()
- namebase = wrap[0]
- logfilename = logfile_base + '-' + namebase.replace(' ', '_') + '.txt'
- jsonlogfilename = logfile_base + '-' + namebase.replace(' ', '_') + '.json'
- with open(datafilename, 'rb') as f:
- tests = pickle.load(f)
- if len(tests) == 0:
- print('No tests defined.')
- return
- numlen = len('%d' % len(tests))
- executor = conc.ThreadPoolExecutor(max_workers=num_workers)
- futures = []
- filtered_tests = filter_tests(options.suite, tests)
-
- with open(jsonlogfilename, 'w') as jsonlogfile, \
- open(logfilename, 'w') as logfile:
- logfile.write('Log of Meson test suite run on %s.\n\n' %
- datetime.datetime.now().isoformat())
- for i, test in enumerate(filtered_tests):
- if test.suite[0] == '':
- visible_name = test.name
- else:
- if options.suite is not None:
- visible_name = options.suite + ' / ' + test.name
- else:
- visible_name = test.suite[0] + ' / ' + test.name
-
- if not test.is_parallel:
- drain_futures(futures)
- futures = []
- res = run_single_test(wrap, test)
- print_stats(numlen, filtered_tests, visible_name, res, i,
- logfile, jsonlogfile)
- else:
- f = executor.submit(run_single_test, wrap, test)
- futures.append((f, numlen, filtered_tests, visible_name, i,
- logfile, jsonlogfile))
- drain_futures(futures)
- return logfilename
-
def run(args):
- global collected_logs, error_count, options
- collected_logs = [] # To avoid state leaks when invoked multiple times (running tests in-process)
- error_count = 0
options = parser.parse_args(args)
if len(options.args) != 1:
print('Test runner for Meson. Do not run on your own, mmm\'kay?')
print('%s [data file]' % sys.argv[0])
if options.wd is not None:
os.chdir(options.wd)
- datafile = options.args[0]
- logfilename = run_tests(datafile, options.logbase, options.num_processes)
- if len(collected_logs) > 0:
- if len(collected_logs) > 10:
- print('\nThe output from 10 first failed tests:\n')
- else:
- print('\nThe output from the failed tests:\n')
- for log in collected_logs[:10]:
- lines = log.splitlines()
- if len(lines) > 100:
- print(lines[0])
- print('--- Listing only the last 100 lines from a long log. ---')
- lines = lines[-99:]
- for line in lines:
- print(line)
- if logfilename:
- print('Full log written to %s.' % logfilename)
- return error_count
+ th = TestHarness(options)
+ return th.doit()
if __name__ == '__main__':
sys.exit(run(sys.argv[1:]))
diff --git a/mesontest.py b/mesontest.py
index 30302d8..3a2b0a3 100755
--- a/mesontest.py
+++ b/mesontest.py
@@ -19,6 +19,7 @@
import subprocess, sys, os, argparse
import pickle
from mesonbuild.scripts import meson_test, meson_benchmark
+from mesonbuild import environment
parser = argparse.ArgumentParser()
parser.add_argument('--repeat', default=1, dest='repeat', type=int,
@@ -53,7 +54,12 @@ def gdbrun(test):
def run(args):
datafile = 'meson-private/meson_test_setup.dat'
- if args[0] == '--benchmark':
+ if not os.path.isfile(datafile):
+ print('Test data file. Probably this means that you did not run this in the build directory.')
+ return 1
+ if os.path.isfile('build.ninja'):
+ subprocess.check_call([environment.detect_ninja(), 'all'])
+ if len(args) > 0 and args[0] == '--benchmark':
return meson_benchmark.run(args[1:] + ['meson-private/meson_benchmark_setup.dat'])
options = parser.parse_args(args)
if len(options.tests) == 0: