aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJussi Pakkanen <jpakkane@gmail.com>2015-12-03 00:28:52 +0200
committerJussi Pakkanen <jpakkane@gmail.com>2015-12-03 00:28:52 +0200
commitf7608fc56941e81e18647de00591ba23050c5f7b (patch)
tree392bd3d5f0935ddde13b0610676f97e07fbe20a9
parent913963d608a419a336473203c3167987f8c15060 (diff)
parentb167f3a56f2f21f5dab284463e3be15a201738b2 (diff)
downloadmeson-f7608fc56941e81e18647de00591ba23050c5f7b.zip
meson-f7608fc56941e81e18647de00591ba23050c5f7b.tar.gz
meson-f7608fc56941e81e18647de00591ba23050c5f7b.tar.bz2
Merge pull request #317 from mesonbuild/benchmark
Create benchmark feature
-rw-r--r--backends.py12
-rw-r--r--build.py4
-rw-r--r--coredata.py1
-rw-r--r--interpreter.py15
-rwxr-xr-xmeson_benchmark.py97
-rwxr-xr-xmesonintrospect.py12
-rw-r--r--ninjabackend.py11
-rwxr-xr-xrun_tests.py9
-rw-r--r--test cases/common/99 benchmark/delayer.c20
-rw-r--r--test cases/common/99 benchmark/meson.build5
-rw-r--r--test cases/frameworks/1 boost/nomod.cpp2
11 files changed, 179 insertions, 9 deletions
diff --git a/backends.py b/backends.py
index 5710fac..03ecd4b 100644
--- a/backends.py
+++ b/backends.py
@@ -127,6 +127,10 @@ class Backend():
datafile = open(test_data, 'wb')
self.write_test_file(datafile)
datafile.close()
+ benchmark_data = os.path.join(self.environment.get_scratch_dir(), 'meson_benchmark_setup.dat')
+ datafile = open(benchmark_data, 'wb')
+ self.write_benchmark_file(datafile)
+ datafile.close()
def has_vala(self, target):
for s in target.get_sources():
@@ -269,9 +273,15 @@ class Backend():
result.append(dirseg)
return result
+ def write_benchmark_file(self, datafile):
+ self.write_test_serialisation(self.build.get_benchmarks(), datafile)
+
def write_test_file(self, datafile):
+ self.write_test_serialisation(self.build.get_tests(), datafile)
+
+ def write_test_serialisation(self, tests, datafile):
arr = []
- for t in self.build.get_tests():
+ for t in tests:
exe = t.get_exe()
if isinstance(exe, dependencies.ExternalProgram):
fname = exe.fullpath
diff --git a/build.py b/build.py
index 06877a1..54b5b69 100644
--- a/build.py
+++ b/build.py
@@ -65,6 +65,7 @@ class Build:
self.cross_compilers = []
self.global_args = {}
self.tests = []
+ self.benchmarks = []
self.headers = []
self.man = []
self.data = []
@@ -108,6 +109,9 @@ class Build:
def get_tests(self):
return self.tests
+ def get_benchmarks(self):
+ return self.benchmarks
+
def get_headers(self):
return self.headers
diff --git a/coredata.py b/coredata.py
index dbb0c71..e216428 100644
--- a/coredata.py
+++ b/coredata.py
@@ -215,6 +215,7 @@ forbidden_target_names = {'clean': None,
'all': None,
'test': None,
'test-valgrind': None,
+ 'benchmark': None,
'install': None,
'build.ninja': None,
}
diff --git a/interpreter.py b/interpreter.py
index d60de83..cacfe05 100644
--- a/interpreter.py
+++ b/interpreter.py
@@ -943,6 +943,7 @@ class Interpreter():
'run_target' : self.func_run_target,
'generator' : self.func_generator,
'test' : self.func_test,
+ 'benchmark' : self.func_benchmark,
'install_headers' : self.func_install_headers,
'install_man' : self.func_install_man,
'subdir' : self.func_subdir,
@@ -1677,7 +1678,13 @@ class Interpreter():
self.generators.append(gen)
return gen
+ def func_benchmark(self, node, args, kwargs):
+ self.add_test(node, args, kwargs, False)
+
def func_test(self, node, args, kwargs):
+ self.add_test(node, args, kwargs, True)
+
+ def add_test(self, node, args, kwargs, is_base_test):
if len(args) != 2:
raise InterpreterException('Incorrect number of arguments')
if not isinstance(args[0], str):
@@ -1719,8 +1726,12 @@ class Interpreter():
if not isinstance(timeout, int):
raise InterpreterException('Timeout must be an integer.')
t = Test(args[0], args[1].held_object, par, cmd_args, env, should_fail, valgrind_args, timeout)
- self.build.tests.append(t)
- mlog.debug('Adding test "', mlog.bold(args[0]), '".', sep='')
+ if is_base_test:
+ self.build.tests.append(t)
+ mlog.debug('Adding test "', mlog.bold(args[0]), '".', sep='')
+ else:
+ self.build.benchmarks.append(t)
+ mlog.debug('Adding benchmark "', mlog.bold(args[0]), '".', sep='')
@stringArgs
def func_install_headers(self, node, args, kwargs):
diff --git a/meson_benchmark.py b/meson_benchmark.py
new file mode 100755
index 0000000..4888d9f
--- /dev/null
+++ b/meson_benchmark.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python3
+
+# Copyright 2015 The Meson development team
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import subprocess, sys, os, argparse
+import pickle, statistics, json
+import meson_test
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--wd', default=None, dest='wd',
+ help='directory to cd into before running')
+parser.add_argument('args', nargs='+')
+
+def print_stats(numlen, num_tests, name, res, i, duration, stdev):
+ startpad = ' '*(numlen - len('%d' % (i+1)))
+ num = '%s%d/%d' % (startpad, i+1, num_tests)
+ padding1 = ' '*(38-len(name))
+ padding2 = ' '*(8-len(res))
+ result_str = '%s %s %s%s%s%5.5f s +- %5.5f s' % \
+ (num, name, padding1, res, padding2, duration, stdev)
+ print(result_str)
+# write_json_log(jsonlogfile, name, result)
+
+def print_json_log(jsonlogfile, rawruns, test_name, i):
+ jsonobj = {'name' : test_name}
+ runs = []
+ for r in rawruns:
+ runobj = {'duration': r.duration,
+ 'stdout': r.stdo,
+ 'stderr': r.stde,
+ 'returncode' : r.returncode,
+ 'duration' : r.duration}
+ runs.append(runobj)
+ jsonobj['runs'] = runs
+ jsonlogfile.write(json.dumps(jsonobj) + '\n')
+ jsonlogfile.flush()
+
+def run_benchmarks(options, datafile):
+ failed_tests = 0
+ logfile_base = 'meson-logs/benchmarklog'
+ jsonlogfilename = logfile_base+ '.json'
+ jsonlogfile = open(jsonlogfilename, 'w')
+ tests = pickle.load(open(datafile, 'rb'))
+ num_tests = len(tests)
+ if num_tests == 0:
+ print('No benchmarks defined.')
+ return 0
+ iteration_count = 5
+ wrap = [] # Benchmarks on cross builds are pointless so don't support them.
+ for i, test in enumerate(tests):
+ runs = []
+ durations = []
+ failed = False
+ for _ in range(iteration_count):
+ res = meson_test.run_single_test(wrap, test)
+ runs.append(res)
+ durations.append(res.duration)
+ if res.returncode != 0:
+ failed = True
+ mean = statistics.mean(durations)
+ stddev = statistics.stdev(durations)
+ if failed:
+ resultstr = 'FAIL'
+ failed_tests += 1
+ else:
+ resultstr = 'OK'
+ print_stats(3, num_tests, test.name, resultstr, i, mean, stddev)
+ print_json_log(jsonlogfile, runs, test.name, i)
+ print('\nFull log written to meson-logs/benchmarklog.json.')
+ return failed_tests
+
+def run(args):
+ global failed_tests
+ options = parser.parse_args(args)
+ if len(options.args) != 1:
+ print('Benchmark runner for Meson. Do not run on your own, mmm\'kay?')
+ print('%s [data file]' % sys.argv[0])
+ if options.wd is not None:
+ os.chdir(options.wd)
+ datafile = options.args[0]
+ returncode = run_benchmarks(options, datafile)
+ return returncode
+
+if __name__ == '__main__':
+ sys.exit(run(sys.argv[1:]))
diff --git a/mesonintrospect.py b/mesonintrospect.py
index 3ef2ab5..f6d1f14 100755
--- a/mesonintrospect.py
+++ b/mesonintrospect.py
@@ -37,6 +37,8 @@ parser.add_argument('--buildoptions', action='store_true', dest='buildoptions',
help='List all build options.')
parser.add_argument('--tests', action='store_true', dest='tests', default=False,
help='List all unit tests.')
+parser.add_argument('--benchmarks', action='store_true', dest='benchmarks', default=False,
+ help='List all benchmarks.')
parser.add_argument('--dependencies', action='store_true', dest='dependencies', default=False,
help='list external dependencies.')
parser.add_argument('args', nargs='+')
@@ -157,7 +159,11 @@ def list_tests(testdata):
result = []
for t in testdata:
to = {}
- to['cmd'] = [t.fname] + t.cmd_args
+ if isinstance(t.fname, str):
+ fname = [t.fname]
+ else:
+ fname = t.fname
+ to['cmd'] = fname + t.cmd_args
to['env'] = t.env
to['name'] = t.name
result.append(to)
@@ -175,9 +181,11 @@ if __name__ == '__main__':
corefile = os.path.join(bdir, 'meson-private/coredata.dat')
buildfile = os.path.join(bdir, 'meson-private/build.dat')
testfile = os.path.join(bdir, 'meson-private/meson_test_setup.dat')
+ benchmarkfile = os.path.join(bdir, 'meson-private/meson_benchmark_setup.dat')
coredata = pickle.load(open(corefile, 'rb'))
builddata = pickle.load(open(buildfile, 'rb'))
testdata = pickle.load(open(testfile, 'rb'))
+ benchmarkdata = pickle.load(open(benchmarkfile, 'rb'))
if options.list_targets:
list_targets(coredata, builddata)
elif options.target_files is not None:
@@ -188,6 +196,8 @@ if __name__ == '__main__':
list_buildoptions(coredata, builddata)
elif options.tests:
list_tests(testdata)
+ elif options.benchmarks:
+ list_tests(benchmarkdata)
elif options.dependencies:
list_deps(coredata)
else:
diff --git a/ninjabackend.py b/ninjabackend.py
index 11efc2a..7d72e06 100644
--- a/ninjabackend.py
+++ b/ninjabackend.py
@@ -550,6 +550,17 @@ class NinjaBackend(backends.Backend):
velem.write(outfile)
self.check_outputs(velem)
+ # And then benchmarks.
+ benchmark_script = os.path.join(script_root, 'meson_benchmark.py')
+ benchmark_data = os.path.join(self.environment.get_scratch_dir(), 'meson_benchmark_setup.dat')
+ cmd = [sys.executable, benchmark_script, benchmark_data]
+ elem = NinjaBuildElement('benchmark', 'CUSTOM_COMMAND', ['all', 'PHONY'])
+ elem.add_item('COMMAND', cmd)
+ elem.add_item('DESC', 'Running benchmark suite.')
+ elem.add_item('pool', 'console')
+ elem.write(outfile)
+ self.check_outputs(elem)
+
def generate_rules(self, outfile):
outfile.write('# Rules for compiling.\n\n')
self.generate_compile_rules(outfile)
diff --git a/run_tests.py b/run_tests.py
index f6a6a81..e84d610 100755
--- a/run_tests.py
+++ b/run_tests.py
@@ -21,7 +21,7 @@ import sys
import environment
import mesonlib
import mlog
-import meson, meson_test
+import meson, meson_test, meson_benchmark
import argparse
import xml.etree.ElementTree as ET
import time
@@ -87,7 +87,7 @@ def setup_commands(backend):
compile_commands = [ninja_command, '-v']
else:
compile_commands = [ninja_command]
- test_commands = [ninja_command, 'test']
+ test_commands = [ninja_command, 'test', 'benchmark']
install_commands = [ninja_command, 'install']
def platform_fix_filename(fname):
@@ -165,11 +165,12 @@ def run_test_inprocess(testdir):
sys.stderr = mystderr = StringIO()
old_cwd = os.getcwd()
os.chdir(testdir)
- returncode = meson_test.run(['meson-private/meson_test_setup.dat'])
+ returncode_test = meson_test.run(['meson-private/meson_test_setup.dat'])
+ returncode_benchmark = meson_benchmark.run(['meson-private/meson_benchmark_setup.dat'])
sys.stdout = old_stdout
sys.stderr = old_stderr
os.chdir(old_cwd)
- return (returncode, mystdout.getvalue(), mystderr.getvalue())
+ return (max(returncode_test, returncode_benchmark), mystdout.getvalue(), mystderr.getvalue())
def run_test(testdir, extra_args, should_succeed):
diff --git a/test cases/common/99 benchmark/delayer.c b/test cases/common/99 benchmark/delayer.c
new file mode 100644
index 0000000..cfcedad
--- /dev/null
+++ b/test cases/common/99 benchmark/delayer.c
@@ -0,0 +1,20 @@
+/* Simple prog that sleeps for a random time. */
+
+#include<stdlib.h>
+#include<time.h>
+#if defined(_WIN32)
+#include<windows.h>
+#endif
+
+int main(int argc, char **argv) {
+ srand(time(NULL));
+#if !defined(_WIN32)
+ struct timespec t;
+ t.tv_sec = 0;
+ t.tv_nsec = 199999999.0*rand()/RAND_MAX;
+ nanosleep(&t, NULL);
+#else
+ Sleep(50.0*rand()/RAND_MAX);
+#endif
+ return 0;
+}
diff --git a/test cases/common/99 benchmark/meson.build b/test cases/common/99 benchmark/meson.build
new file mode 100644
index 0000000..bd4340b
--- /dev/null
+++ b/test cases/common/99 benchmark/meson.build
@@ -0,0 +1,5 @@
+project('benchmark', 'c',
+ default_options : ['c_std=gnu99'])
+
+delayer = executable('delayer', 'delayer.c')
+benchmark('delayer', delayer)
diff --git a/test cases/frameworks/1 boost/nomod.cpp b/test cases/frameworks/1 boost/nomod.cpp
index 7b16881..55c95b2 100644
--- a/test cases/frameworks/1 boost/nomod.cpp
+++ b/test cases/frameworks/1 boost/nomod.cpp
@@ -9,7 +9,7 @@ boost::any get_any() {
int main(int argc, char **argv) {
boost::any result = get_any();
if(boost::any_cast<int>(result) == 3) {
- std::cout << "Everything is fine in the worls.\n";
+ std::cout << "Everything is fine in the world.\n";
return 0;
} else {
std::cout << "Mathematics stopped working.\n";