diff options
author | Elliott Sales de Andrade <quantum.analyst@gmail.com> | 2016-08-24 19:29:11 -0400 |
---|---|---|
committer | Elliott Sales de Andrade <quantum.analyst@gmail.com> | 2016-08-27 18:29:55 -0400 |
commit | 4c71695e41a50dda3199d26ed7aedbaaf3150768 (patch) | |
tree | 8bd499a2a113c3da5c1dee8ed29f4b3c69d27dd7 /mesonbuild/scripts/meson_benchmark.py | |
parent | 7830cb61c39fbaf57933ac403dcdf5007667d87d (diff) | |
download | meson-4c71695e41a50dda3199d26ed7aedbaaf3150768.zip meson-4c71695e41a50dda3199d26ed7aedbaaf3150768.tar.gz meson-4c71695e41a50dda3199d26ed7aedbaaf3150768.tar.bz2 |
Use context manager for file I/O.
There are a few cases where a context manager cannot be used, such as
the logger.
Diffstat (limited to 'mesonbuild/scripts/meson_benchmark.py')
-rw-r--r-- | mesonbuild/scripts/meson_benchmark.py | 43 |
1 files changed, 22 insertions, 21 deletions
diff --git a/mesonbuild/scripts/meson_benchmark.py b/mesonbuild/scripts/meson_benchmark.py index d1107b6..6d138b0 100644 --- a/mesonbuild/scripts/meson_benchmark.py +++ b/mesonbuild/scripts/meson_benchmark.py @@ -52,33 +52,34 @@ def run_benchmarks(options, datafile): failed_tests = 0 logfile_base = 'meson-logs/benchmarklog' jsonlogfilename = logfile_base+ '.json' - jsonlogfile = open(jsonlogfilename, 'w') - tests = pickle.load(open(datafile, 'rb')) + with open(datafile, 'rb') as f: + tests = pickle.load(f) num_tests = len(tests) if num_tests == 0: print('No benchmarks defined.') return 0 iteration_count = 5 wrap = [] # Benchmarks on cross builds are pointless so don't support them. - for i, test in enumerate(tests): - runs = [] - durations = [] - failed = False - for _ in range(iteration_count): - res = meson_test.run_single_test(wrap, test) - runs.append(res) - durations.append(res.duration) - if res.returncode != 0: - failed = True - mean = statistics.mean(durations) - stddev = statistics.stdev(durations) - if failed: - resultstr = 'FAIL' - failed_tests += 1 - else: - resultstr = 'OK' - print_stats(3, num_tests, test.name, resultstr, i, mean, stddev) - print_json_log(jsonlogfile, runs, test.name, i) + with open(jsonlogfilename, 'w') as jsonlogfile: + for i, test in enumerate(tests): + runs = [] + durations = [] + failed = False + for _ in range(iteration_count): + res = meson_test.run_single_test(wrap, test) + runs.append(res) + durations.append(res.duration) + if res.returncode != 0: + failed = True + mean = statistics.mean(durations) + stddev = statistics.stdev(durations) + if failed: + resultstr = 'FAIL' + failed_tests += 1 + else: + resultstr = 'OK' + print_stats(3, num_tests, test.name, resultstr, i, mean, stddev) + print_json_log(jsonlogfile, runs, test.name, i) print('\nFull log written to meson-logs/benchmarklog.json.') return failed_tests |