aboutsummaryrefslogtreecommitdiff
path: root/scripts/simplebench/results_to_text.py
blob: d561e5e2dbe03c97d2e89740d5bcfdbbf067d341 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
#!/usr/bin/env python3
#
# Simple benchmarking framework
#
# Copyright (c) 2019 Virtuozzo International GmbH.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
#

import math
import tabulate

# We want leading whitespace for difference row cells (see below)
tabulate.PRESERVE_WHITESPACE = True


def format_value(x, stdev):
    stdev_pr = stdev / x * 100
    if stdev_pr < 1.5:
        # don't care too much
        return f'{x:.2g}'
    else:
        return f'{x:.2g} ± {math.ceil(stdev_pr)}%'


def result_to_text(result):
    """Return text representation of bench_one() returned dict."""
    if 'average' in result:
        s = format_value(result['average'], result['stdev'])
        if 'n-failed' in result:
            s += '\n({} failed)'.format(result['n-failed'])
        return s
    else:
        return 'FAILED'


def results_dimension(results):
    dim = None
    for case in results['cases']:
        for env in results['envs']:
            res = results['tab'][case['id']][env['id']]
            if dim is None:
                dim = res['dimension']
            else:
                assert dim == res['dimension']

    assert dim in ('iops', 'seconds')

    return dim


def results_to_text(results):
    """Return text representation of bench() returned dict."""
    n_columns = len(results['envs'])
    named_columns = n_columns > 2
    dim = results_dimension(results)
    tab = []

    if named_columns:
        # Environment columns are named A, B, ...
        tab.append([''] + [chr(ord('A') + i) for i in range(n_columns)])

    tab.append([''] + [c['id'] for c in results['envs']])

    for case in results['cases']:
        row = [case['id']]
        case_results = results['tab'][case['id']]
        for env in results['envs']:
            res = case_results[env['id']]
            row.append(result_to_text(res))
        tab.append(row)

        # Add row of difference between columns. For each column starting from
        # B we calculate difference with all previous columns.
        row = ['', '']  # case name and first column
        for i in range(1, n_columns):
            cell = ''
            env = results['envs'][i]
            res = case_results[env['id']]

            if 'average' not in res:
                # Failed result
                row.append(cell)
                continue

            for j in range(0, i):
                env_j = results['envs'][j]
                res_j = case_results[env_j['id']]
                cell += ' '

                if 'average' not in res_j:
                    # Failed result
                    cell += '--'
                    continue

                col_j = tab[0][j + 1] if named_columns else ''
                diff_pr = round((res['average'] - res_j['average']) /
                                res_j['average'] * 100)
                cell += f' {col_j}{diff_pr:+}%'
            row.append(cell)
        tab.append(row)

    return f'All results are in {dim}\n\n' + tabulate.tabulate(tab)


if __name__ == '__main__':
    import sys
    import json

    if len(sys.argv) < 2:
        print(f'USAGE: {sys.argv[0]} results.json')
        exit(1)

    with open(sys.argv[1]) as f:
        print(results_to_text(json.load(f)))