diff options
author | Leonardo Sandoval <leonardo.sandoval.gonzalez@linux.intel.com> | 2018-07-13 10:45:40 -0500 |
---|---|---|
committer | Leonardo Sandoval <leonardo.sandoval.gonzalez@linux.intel.com> | 2018-07-19 14:53:37 -0500 |
commit | 1cf4ae7fe644f5ad37ca82cb432147daf5c8ad77 (patch) | |
tree | 550b9b30af93be6a87402649a961bb44d80bd453 /benchtests/scripts | |
parent | e84bd8514cd4bf37b37d3f68feafc1e20afa4b56 (diff) | |
download | glibc-1cf4ae7fe644f5ad37ca82cb432147daf5c8ad77.zip glibc-1cf4ae7fe644f5ad37ca82cb432147daf5c8ad77.tar.gz glibc-1cf4ae7fe644f5ad37ca82cb432147daf5c8ad77.tar.bz2 |
benchtests: improve argument parsing through argparse library
The argparse library is used on compare_bench script to improve command line
argument parsing. The 'schema validation file' is now optional, reducing by
one the number of required parameters.
* benchtests/scripts/compare_bench.py (__main__): use the argparse
library to improve command line parsing.
(__main__): make schema file as optional parameter (--schema),
defaulting to benchtests/scripts/benchout.schema.json.
(main): move out of the parsing stuff to __main_ and leave it
only as caller of main comparison functions.
Diffstat (limited to 'benchtests/scripts')
-rwxr-xr-x | benchtests/scripts/compare_bench.py | 40 |
1 files changed, 19 insertions, 21 deletions
diff --git a/benchtests/scripts/compare_bench.py b/benchtests/scripts/compare_bench.py index ea25f77..88e8911 100755 --- a/benchtests/scripts/compare_bench.py +++ b/benchtests/scripts/compare_bench.py @@ -25,6 +25,7 @@ import sys import os import pylab import import_bench as bench +import argparse def do_compare(func, var, tl1, tl2, par, threshold): """Compare one of the aggregate measurements @@ -151,26 +152,9 @@ def plot_graphs(bench1, bench2): print('Writing out %s' % filename) pylab.savefig(filename) - -def main(args): - """Program Entry Point - - Take two benchmark output files and compare their timings. - """ - if len(args) > 4 or len(args) < 3: - print('Usage: %s <schema> <file1> <file2> [threshold in %%]' % sys.argv[0]) - sys.exit(os.EX_USAGE) - - bench1 = bench.parse_bench(args[1], args[0]) - bench2 = bench.parse_bench(args[2], args[0]) - if len(args) == 4: - threshold = float(args[3]) - else: - threshold = 10.0 - - if (bench1['timing_type'] != bench2['timing_type']): - print('Cannot compare benchmark outputs: timing types are different') - return +def main(bench1, bench2, schema, threshold): + bench1 = bench.parse_bench(bench1, schema) + bench2 = bench.parse_bench(bench2, schema) plot_graphs(bench1, bench2) @@ -181,4 +165,18 @@ def main(args): if __name__ == '__main__': - main(sys.argv[1:]) + parser = argparse.ArgumentParser(description='Take two benchmark and compare their timings.') + + # Required parameters + parser.add_argument('bench1', help='First bench to compare') + parser.add_argument('bench2', help='Second bench to compare') + + # Optional parameters + parser.add_argument('--schema', + default=os.path.join(os.path.dirname(os.path.realpath(__file__)),'benchout.schema.json'), + help='JSON file to validate source/dest files (default: %(default)s)') + parser.add_argument('--threshold', default=10.0, help='Only print those with equal or higher threshold (default: %(default)s)') + + args = parser.parse_args() + + main(args.bench1, args.bench2, args.schema, args.threshold) |