aboutsummaryrefslogtreecommitdiff
path: root/mesonbuild
diff options
context:
space:
mode:
Diffstat (limited to 'mesonbuild')
-rwxr-xr-xmesonbuild/scripts/meson_benchmark.py79
-rwxr-xr-xmesonbuild/scripts/meson_test.py40
2 files changed, 30 insertions, 89 deletions
diff --git a/mesonbuild/scripts/meson_benchmark.py b/mesonbuild/scripts/meson_benchmark.py
index 9029c21..553fc89 100755
--- a/mesonbuild/scripts/meson_benchmark.py
+++ b/mesonbuild/scripts/meson_benchmark.py
@@ -14,86 +14,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import subprocess, sys, os, argparse
-import pickle, statistics, json
+import sys
from . import meson_test
-parser = argparse.ArgumentParser()
-parser.add_argument('--wd', default=None, dest='wd',
- help='directory to cd into before running')
-parser.add_argument('args', nargs='+')
-
-def print_stats(numlen, num_tests, name, res, i, duration, stdev):
- startpad = ' '*(numlen - len('%d' % (i+1)))
- num = '%s%d/%d' % (startpad, i+1, num_tests)
- padding1 = ' '*(38-len(name))
- padding2 = ' '*(8-len(res))
- result_str = '%s %s %s%s%s%5.5f s +- %5.5f s' % \
- (num, name, padding1, res, padding2, duration, stdev)
- print(result_str)
-# write_json_log(jsonlogfile, name, result)
-
-def print_json_log(jsonlogfile, rawruns, test_name, i):
- jsonobj = {'name' : test_name}
- runs = []
- for r in rawruns:
- runobj = {'duration': r.duration,
- 'stdout': r.stdo,
- 'returncode' : r.returncode,
- 'duration' : r.duration}
- if r.stde:
- runobj['stderr'] = r.stde
- runs.append(runobj)
- jsonobj['runs'] = runs
- jsonlogfile.write(json.dumps(jsonobj) + '\n')
- jsonlogfile.flush()
-
-def run_benchmarks(options, datafile):
- failed_tests = 0
- logfile_base = 'meson-logs/benchmarklog'
- jsonlogfilename = logfile_base+ '.json'
- with open(datafile, 'rb') as f:
- tests = pickle.load(f)
- num_tests = len(tests)
- if num_tests == 0:
- print('No benchmarks defined.')
- return 0
- iteration_count = 5
- wrap = [] # Benchmarks on cross builds are pointless so don't support them.
- with open(jsonlogfilename, 'w') as jsonlogfile:
- for i, test in enumerate(tests):
- runs = []
- durations = []
- failed = False
- for _ in range(iteration_count):
- res = meson_test.run_single_test(wrap, test)
- runs.append(res)
- durations.append(res.duration)
- if res.returncode != 0:
- failed = True
- mean = statistics.mean(durations)
- stddev = statistics.stdev(durations)
- if failed:
- resultstr = 'FAIL'
- failed_tests += 1
- else:
- resultstr = 'OK'
- print_stats(3, num_tests, test.name, resultstr, i, mean, stddev)
- print_json_log(jsonlogfile, runs, test.name, i)
- print('\nFull log written to meson-logs/benchmarklog.json.')
- return failed_tests
-
def run(args):
- global failed_tests
- options = parser.parse_args(args)
- if len(options.args) != 1:
- print('Benchmark runner for Meson. Do not run on your own, mmm\'kay?')
- print('%s [data file]' % sys.argv[0])
- if options.wd is not None:
- os.chdir(options.wd)
- datafile = options.args[0]
- returncode = run_benchmarks(options, datafile)
- return returncode
+ return meson_test.run(args + ['--logbase', 'benchmarklog', '--num-processes=1'])
if __name__ == '__main__':
sys.exit(run(sys.argv[1:]))
diff --git a/mesonbuild/scripts/meson_test.py b/mesonbuild/scripts/meson_test.py
index 3d0d957..24b8bac 100755
--- a/mesonbuild/scripts/meson_test.py
+++ b/mesonbuild/scripts/meson_test.py
@@ -41,6 +41,10 @@ parser.add_argument('--no-stdsplit', default=True, dest='split', action='store_f
help='Do not split stderr and stdout in test logs.')
parser.add_argument('--print-errorlogs', default=False, action='store_true',
help="Whether to print faling tests' logs.")
+parser.add_argument('--logbase', default='testlog',
+ help="Base name for log file.")
+parser.add_argument('--num-processes', default=None,
+ help='How many parallel processes to use.')
parser.add_argument('args', nargs='+')
@@ -198,9 +202,30 @@ def filter_tests(suite, tests):
return tests
return [x for x in tests if suite in x.suite]
-def run_tests(datafilename):
+def determine_worker_count():
+ varname = 'MESON_TESTTHREADS'
+ if varname in os.environ:
+ try:
+ num_workers = int(os.environ[varname])
+ except ValueError:
+ print('Invalid value in %s, using 1 thread.' % varname)
+ num_workers = 1
+ else:
+ try:
+ # Fails in some weird environments such as Debian
+ # reproducible build.
+ num_workers = multiprocessing.cpu_count()
+ except Exception:
+ num_workers = 1
+ return num_workers
+
+def run_tests(datafilename, log_base, num_workers=None):
global options
- logfile_base = 'meson-logs/testlog'
+ if num_workers is None:
+ num_workers = determine_worker_count()
+ else:
+ num_workers = int(num_workers)
+ logfile_base = os.path.join('meson-logs', log_base)
if options.wrapper is None:
wrap = []
logfilename = logfile_base + '.txt'
@@ -215,15 +240,6 @@ def run_tests(datafilename):
print('No tests defined.')
return
numlen = len('%d' % len(tests))
- varname = 'MESON_TESTTHREADS'
- if varname in os.environ:
- try:
- num_workers = int(os.environ[varname])
- except ValueError:
- print('Invalid value in %s, using 1 thread.' % varname)
- num_workers = 1
- else:
- num_workers = multiprocessing.cpu_count()
executor = conc.ThreadPoolExecutor(max_workers=num_workers)
futures = []
filtered_tests = filter_tests(options.suite, tests)
@@ -265,7 +281,7 @@ def run(args):
if options.wd is not None:
os.chdir(options.wd)
datafile = options.args[0]
- logfilename = run_tests(datafile)
+ logfilename = run_tests(datafile, options.logbase, options.num_processes)
if len(collected_logs) > 0:
if len(collected_logs) > 10:
print('\nThe output from 10 first failed tests:\n')