aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--backends.py12
-rw-r--r--build.py4
-rw-r--r--coredata.py1
-rw-r--r--interpreter.py15
-rwxr-xr-xmeson_benchmark.py5
-rw-r--r--ninjabackend.py11
-rwxr-xr-xrun_tests.py9
-rw-r--r--test cases/common/98 benchmark/delayer.c21
-rw-r--r--test cases/common/98 benchmark/meson.build5
9 files changed, 75 insertions, 8 deletions
diff --git a/backends.py b/backends.py
index 5710fac..03ecd4b 100644
--- a/backends.py
+++ b/backends.py
@@ -127,6 +127,10 @@ class Backend():
datafile = open(test_data, 'wb')
self.write_test_file(datafile)
datafile.close()
+ benchmark_data = os.path.join(self.environment.get_scratch_dir(), 'meson_benchmark_setup.dat')
+ datafile = open(benchmark_data, 'wb')
+ self.write_benchmark_file(datafile)
+ datafile.close()
def has_vala(self, target):
for s in target.get_sources():
@@ -269,9 +273,15 @@ class Backend():
result.append(dirseg)
return result
+ def write_benchmark_file(self, datafile):
+ self.write_test_serialisation(self.build.get_benchmarks(), datafile)
+
def write_test_file(self, datafile):
+ self.write_test_serialisation(self.build.get_tests(), datafile)
+
+ def write_test_serialisation(self, tests, datafile):
arr = []
- for t in self.build.get_tests():
+ for t in tests:
exe = t.get_exe()
if isinstance(exe, dependencies.ExternalProgram):
fname = exe.fullpath
diff --git a/build.py b/build.py
index 47221c1..5c200c1 100644
--- a/build.py
+++ b/build.py
@@ -65,6 +65,7 @@ class Build:
self.cross_compilers = []
self.global_args = {}
self.tests = []
+ self.benchmarks = []
self.headers = []
self.man = []
self.data = []
@@ -108,6 +109,9 @@ class Build:
def get_tests(self):
return self.tests
+ def get_benchmarks(self):
+ return self.benchmarks
+
def get_headers(self):
return self.headers
diff --git a/coredata.py b/coredata.py
index dbb0c71..e216428 100644
--- a/coredata.py
+++ b/coredata.py
@@ -215,6 +215,7 @@ forbidden_target_names = {'clean': None,
'all': None,
'test': None,
'test-valgrind': None,
+ 'benchmark': None,
'install': None,
'build.ninja': None,
}
diff --git a/interpreter.py b/interpreter.py
index 3da71a1..6fe0212 100644
--- a/interpreter.py
+++ b/interpreter.py
@@ -944,6 +944,7 @@ class Interpreter():
'run_target' : self.func_run_target,
'generator' : self.func_generator,
'test' : self.func_test,
+ 'benchmark' : self.func_benchmark,
'install_headers' : self.func_install_headers,
'install_man' : self.func_install_man,
'subdir' : self.func_subdir,
@@ -1676,7 +1677,13 @@ class Interpreter():
self.generators.append(gen)
return gen
+ def func_benchmark(self, node, args, kwargs):
+ self.add_test(node, args, kwargs, False)
+
def func_test(self, node, args, kwargs):
+ self.add_test(node, args, kwargs, True)
+
+ def add_test(self, node, args, kwargs, is_base_test):
if len(args) != 2:
raise InterpreterException('Incorrect number of arguments')
if not isinstance(args[0], str):
@@ -1718,8 +1725,12 @@ class Interpreter():
if not isinstance(timeout, int):
raise InterpreterException('Timeout must be an integer.')
t = Test(args[0], args[1].held_object, par, cmd_args, env, should_fail, valgrind_args, timeout)
- self.build.tests.append(t)
- mlog.debug('Adding test "', mlog.bold(args[0]), '".', sep='')
+ if is_base_test:
+ self.build.tests.append(t)
+ mlog.debug('Adding test "', mlog.bold(args[0]), '".', sep='')
+ else:
+ self.build.benchmarks.append(t)
+ mlog.debug('Adding benchmark "', mlog.bold(args[0]), '".', sep='')
@stringArgs
def func_install_headers(self, node, args, kwargs):
diff --git a/meson_benchmark.py b/meson_benchmark.py
index 3cde98b..4888d9f 100755
--- a/meson_benchmark.py
+++ b/meson_benchmark.py
@@ -54,6 +54,9 @@ def run_benchmarks(options, datafile):
jsonlogfile = open(jsonlogfilename, 'w')
tests = pickle.load(open(datafile, 'rb'))
num_tests = len(tests)
+ if num_tests == 0:
+ print('No benchmarks defined.')
+ return 0
iteration_count = 5
wrap = [] # Benchmarks on cross builds are pointless so don't support them.
for i, test in enumerate(tests):
@@ -75,6 +78,7 @@ def run_benchmarks(options, datafile):
resultstr = 'OK'
print_stats(3, num_tests, test.name, resultstr, i, mean, stddev)
print_json_log(jsonlogfile, runs, test.name, i)
+ print('\nFull log written to meson-logs/benchmarklog.json.')
return failed_tests
def run(args):
@@ -87,7 +91,6 @@ def run(args):
os.chdir(options.wd)
datafile = options.args[0]
returncode = run_benchmarks(options, datafile)
- print('\nFull log written to meson-logs/benchmarklog.json.')
return returncode
if __name__ == '__main__':
diff --git a/ninjabackend.py b/ninjabackend.py
index 0f3c280..559c006 100644
--- a/ninjabackend.py
+++ b/ninjabackend.py
@@ -550,6 +550,17 @@ class NinjaBackend(backends.Backend):
velem.write(outfile)
self.check_outputs(velem)
+ # And then benchmarks.
+ benchmark_script = os.path.join(script_root, 'meson_benchmark.py')
+ benchmark_data = os.path.join(self.environment.get_scratch_dir(), 'meson_benchmark_setup.dat')
+ cmd = [sys.executable, benchmark_script, benchmark_data]
+ elem = NinjaBuildElement('benchmark', 'CUSTOM_COMMAND', ['all', 'PHONY'])
+ elem.add_item('COMMAND', cmd)
+ elem.add_item('DESC', 'Running benchmark suite.')
+ elem.add_item('pool', 'console')
+ elem.write(outfile)
+ self.check_outputs(elem)
+
def generate_rules(self, outfile):
outfile.write('# Rules for compiling.\n\n')
self.generate_compile_rules(outfile)
diff --git a/run_tests.py b/run_tests.py
index f6a6a81..e84d610 100755
--- a/run_tests.py
+++ b/run_tests.py
@@ -21,7 +21,7 @@ import sys
import environment
import mesonlib
import mlog
-import meson, meson_test
+import meson, meson_test, meson_benchmark
import argparse
import xml.etree.ElementTree as ET
import time
@@ -87,7 +87,7 @@ def setup_commands(backend):
compile_commands = [ninja_command, '-v']
else:
compile_commands = [ninja_command]
- test_commands = [ninja_command, 'test']
+ test_commands = [ninja_command, 'test', 'benchmark']
install_commands = [ninja_command, 'install']
def platform_fix_filename(fname):
@@ -165,11 +165,12 @@ def run_test_inprocess(testdir):
sys.stderr = mystderr = StringIO()
old_cwd = os.getcwd()
os.chdir(testdir)
- returncode = meson_test.run(['meson-private/meson_test_setup.dat'])
+ returncode_test = meson_test.run(['meson-private/meson_test_setup.dat'])
+ returncode_benchmark = meson_benchmark.run(['meson-private/meson_benchmark_setup.dat'])
sys.stdout = old_stdout
sys.stderr = old_stderr
os.chdir(old_cwd)
- return (returncode, mystdout.getvalue(), mystderr.getvalue())
+ return (max(returncode_test, returncode_benchmark), mystdout.getvalue(), mystderr.getvalue())
def run_test(testdir, extra_args, should_succeed):
diff --git a/test cases/common/98 benchmark/delayer.c b/test cases/common/98 benchmark/delayer.c
new file mode 100644
index 0000000..d99906c
--- /dev/null
+++ b/test cases/common/98 benchmark/delayer.c
@@ -0,0 +1,21 @@
+/* Simple prog that sleeps for a random time. */
+
+#include<stdlib.h>
+#if !defined(_MSC_VER)
+#include<time.h>
+#else
+#include<windows.h>
+#endif
+
+int main(int argc, char **argv) {
+ srand(time(NULL));
+#if !defined(_MSC_VER)
+ struct timespec t;
+ t.tv_sec = 0;
+ t.tv_nsec = 199999999.0*rand()/RAND_MAX;
+ nanosleep(&t, NULL);
+#else
+ Sleep(500.0*rand()/RAND_MAX);
+#endif
+ return 0;
+}
diff --git a/test cases/common/98 benchmark/meson.build b/test cases/common/98 benchmark/meson.build
new file mode 100644
index 0000000..bd4340b
--- /dev/null
+++ b/test cases/common/98 benchmark/meson.build
@@ -0,0 +1,5 @@
+project('benchmark', 'c',
+ default_options : ['c_std=gnu99'])
+
+delayer = executable('delayer', 'delayer.c')
+benchmark('delayer', delayer)