aboutsummaryrefslogtreecommitdiff
path: root/mesonbuild/scripts/meson_benchmark.py
diff options
context:
space:
mode:
authorJussi Pakkanen <jpakkane@gmail.com>2016-10-16 21:17:04 +0300
committerJussi Pakkanen <jpakkane@gmail.com>2016-11-18 22:04:29 +0200
commitdef68cbc50efe65af1b156d2d76fc09a457500b5 (patch)
tree227c90a8f1720dfe133a4e497859b115ca0061b2 /mesonbuild/scripts/meson_benchmark.py
parent0d619df2f38590620d0576e6736b14b1f71ce8c8 (diff)
downloadmeson-def68cbc50efe65af1b156d2d76fc09a457500b5.zip
meson-def68cbc50efe65af1b156d2d76fc09a457500b5.tar.gz
meson-def68cbc50efe65af1b156d2d76fc09a457500b5.tar.bz2
Use the same code for tests and benchmarks.
Diffstat (limited to 'mesonbuild/scripts/meson_benchmark.py')
-rwxr-xr-xmesonbuild/scripts/meson_benchmark.py79
1 files changed, 2 insertions, 77 deletions
diff --git a/mesonbuild/scripts/meson_benchmark.py b/mesonbuild/scripts/meson_benchmark.py
index 9029c21..553fc89 100755
--- a/mesonbuild/scripts/meson_benchmark.py
+++ b/mesonbuild/scripts/meson_benchmark.py
@@ -14,86 +14,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import subprocess, sys, os, argparse
-import pickle, statistics, json
+import sys
from . import meson_test
-parser = argparse.ArgumentParser()
-parser.add_argument('--wd', default=None, dest='wd',
- help='directory to cd into before running')
-parser.add_argument('args', nargs='+')
-
-def print_stats(numlen, num_tests, name, res, i, duration, stdev):
- startpad = ' '*(numlen - len('%d' % (i+1)))
- num = '%s%d/%d' % (startpad, i+1, num_tests)
- padding1 = ' '*(38-len(name))
- padding2 = ' '*(8-len(res))
- result_str = '%s %s %s%s%s%5.5f s +- %5.5f s' % \
- (num, name, padding1, res, padding2, duration, stdev)
- print(result_str)
-# write_json_log(jsonlogfile, name, result)
-
-def print_json_log(jsonlogfile, rawruns, test_name, i):
- jsonobj = {'name' : test_name}
- runs = []
- for r in rawruns:
- runobj = {'duration': r.duration,
- 'stdout': r.stdo,
- 'returncode' : r.returncode,
- 'duration' : r.duration}
- if r.stde:
- runobj['stderr'] = r.stde
- runs.append(runobj)
- jsonobj['runs'] = runs
- jsonlogfile.write(json.dumps(jsonobj) + '\n')
- jsonlogfile.flush()
-
-def run_benchmarks(options, datafile):
- failed_tests = 0
- logfile_base = 'meson-logs/benchmarklog'
- jsonlogfilename = logfile_base+ '.json'
- with open(datafile, 'rb') as f:
- tests = pickle.load(f)
- num_tests = len(tests)
- if num_tests == 0:
- print('No benchmarks defined.')
- return 0
- iteration_count = 5
- wrap = [] # Benchmarks on cross builds are pointless so don't support them.
- with open(jsonlogfilename, 'w') as jsonlogfile:
- for i, test in enumerate(tests):
- runs = []
- durations = []
- failed = False
- for _ in range(iteration_count):
- res = meson_test.run_single_test(wrap, test)
- runs.append(res)
- durations.append(res.duration)
- if res.returncode != 0:
- failed = True
- mean = statistics.mean(durations)
- stddev = statistics.stdev(durations)
- if failed:
- resultstr = 'FAIL'
- failed_tests += 1
- else:
- resultstr = 'OK'
- print_stats(3, num_tests, test.name, resultstr, i, mean, stddev)
- print_json_log(jsonlogfile, runs, test.name, i)
- print('\nFull log written to meson-logs/benchmarklog.json.')
- return failed_tests
-
def run(args):
- global failed_tests
- options = parser.parse_args(args)
- if len(options.args) != 1:
- print('Benchmark runner for Meson. Do not run on your own, mmm\'kay?')
- print('%s [data file]' % sys.argv[0])
- if options.wd is not None:
- os.chdir(options.wd)
- datafile = options.args[0]
- returncode = run_benchmarks(options, datafile)
- return returncode
+ return meson_test.run(args + ['--logbase', 'benchmarklog', '--num-processes=1'])
if __name__ == '__main__':
sys.exit(run(sys.argv[1:]))