aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2020-03-20 13:54:23 +0000
committerPeter Maydell <peter.maydell@linaro.org>2020-03-20 13:54:23 +0000
commit3d0ac346032a1fa9afafcaedc979a99f670e077e (patch)
tree33d7fba24953a958748bc5eb450c938766d340a0
parent226cd20706e20264c176f8edbaf17d7c9b7ade4a (diff)
parentf4abfc6cb037da951e7977a67171f361fc6d21d7 (diff)
downloadqemu-3d0ac346032a1fa9afafcaedc979a99f670e077e.zip
qemu-3d0ac346032a1fa9afafcaedc979a99f670e077e.tar.gz
qemu-3d0ac346032a1fa9afafcaedc979a99f670e077e.tar.bz2
Merge remote-tracking branch 'remotes/ehabkost/tags/python-next-pull-request' into staging
Python queue for 5.0 soft freeze * Add scripts/simplebench (Vladimir Sementsov-Ogievskiy) # gpg: Signature made Wed 18 Mar 2020 01:11:49 GMT # gpg: using RSA key 5A322FD5ABC4D3DBACCFD1AA2807936F984DC5A6 # gpg: issuer "ehabkost@redhat.com" # gpg: Good signature from "Eduardo Habkost <ehabkost@redhat.com>" [full] # Primary key fingerprint: 5A32 2FD5 ABC4 D3DB ACCF D1AA 2807 936F 984D C5A6 * remotes/ehabkost/tags/python-next-pull-request: MAINTAINERS: add simplebench scripts/simplebench: add example usage of simplebench scripts/simplebench: add qemu/bench_block_job.py scripts/simplebench: add simplebench.py Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--MAINTAINERS5
-rw-r--r--scripts/simplebench/bench-example.py80
-rwxr-xr-xscripts/simplebench/bench_block_job.py119
-rw-r--r--scripts/simplebench/simplebench.py128
4 files changed, 332 insertions, 0 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index a8d54f0..73c8d52 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2147,6 +2147,11 @@ F: python/qemu/*py
F: scripts/*.py
F: tests/*.py
+Benchmark util
+M: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
+S: Maintained
+F: scripts/simplebench/
+
QAPI
M: Markus Armbruster <armbru@redhat.com>
M: Michael Roth <mdroth@linux.vnet.ibm.com>
diff --git a/scripts/simplebench/bench-example.py b/scripts/simplebench/bench-example.py
new file mode 100644
index 0000000..c642a5b
--- /dev/null
+++ b/scripts/simplebench/bench-example.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python3
+#
+# Benchmark example
+#
+# Copyright (c) 2019 Virtuozzo International GmbH.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import simplebench
+from bench_block_job import bench_block_copy, drv_file, drv_nbd
+
+
+def bench_func(env, case):
+ """ Handle one "cell" of benchmarking table. """
+ return bench_block_copy(env['qemu_binary'], env['cmd'],
+ case['source'], case['target'])
+
+
+# You may set the following five variables to correct values, to turn this
+# example to real benchmark.
+ssd_source = '/path-to-raw-source-image-at-ssd'
+ssd_target = '/path-to-raw-target-image-at-ssd'
+hdd_target = '/path-to-raw-source-image-at-hdd'
+nbd_ip = 'nbd-ip-addr'
+nbd_port = 'nbd-port-number'
+
+# Test-cases are "rows" in benchmark resulting table, 'id' is a caption for
+# the row, other fields are handled by bench_func.
+test_cases = [
+ {
+ 'id': 'ssd -> ssd',
+ 'source': drv_file(ssd_source),
+ 'target': drv_file(ssd_target)
+ },
+ {
+ 'id': 'ssd -> hdd',
+ 'source': drv_file(ssd_source),
+ 'target': drv_file(hdd_target)
+ },
+ {
+ 'id': 'ssd -> nbd',
+ 'source': drv_file(ssd_source),
+ 'target': drv_nbd(nbd_ip, nbd_port)
+ },
+]
+
+# Test-envs are "columns" in benchmark resulting table, 'id is a caption for
+# the column, other fields are handled by bench_func.
+test_envs = [
+ {
+ 'id': 'backup-1',
+ 'cmd': 'blockdev-backup',
+ 'qemu_binary': '/path-to-qemu-binary-1'
+ },
+ {
+ 'id': 'backup-2',
+ 'cmd': 'blockdev-backup',
+ 'qemu_binary': '/path-to-qemu-binary-2'
+ },
+ {
+ 'id': 'mirror',
+ 'cmd': 'blockdev-mirror',
+ 'qemu_binary': '/path-to-qemu-binary-1'
+ }
+]
+
+result = simplebench.bench(bench_func, test_envs, test_cases, count=3)
+print(simplebench.ascii(result))
diff --git a/scripts/simplebench/bench_block_job.py b/scripts/simplebench/bench_block_job.py
new file mode 100755
index 0000000..9808d69
--- /dev/null
+++ b/scripts/simplebench/bench_block_job.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+#
+# Benchmark block jobs
+#
+# Copyright (c) 2019 Virtuozzo International GmbH.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+import sys
+import os
+import socket
+import json
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python'))
+from qemu.machine import QEMUMachine
+from qemu.qmp import QMPConnectError
+
+
+def bench_block_job(cmd, cmd_args, qemu_args):
+ """Benchmark block-job
+
+ cmd -- qmp command to run block-job (like blockdev-backup)
+ cmd_args -- dict of qmp command arguments
+ qemu_args -- list of Qemu command line arguments, including path to Qemu
+ binary
+
+ Returns {'seconds': int} on success and {'error': str} on failure, dict may
+ contain addional 'vm-log' field. Return value is compatible with
+ simplebench lib.
+ """
+
+ vm = QEMUMachine(qemu_args[0], args=qemu_args[1:])
+
+ try:
+ vm.launch()
+ except OSError as e:
+ return {'error': 'popen failed: ' + str(e)}
+ except (QMPConnectError, socket.timeout):
+ return {'error': 'qemu failed: ' + str(vm.get_log())}
+
+ try:
+ res = vm.qmp(cmd, **cmd_args)
+ if res != {'return': {}}:
+ vm.shutdown()
+ return {'error': '"{}" command failed: {}'.format(cmd, str(res))}
+
+ e = vm.event_wait('JOB_STATUS_CHANGE')
+ assert e['data']['status'] == 'created'
+ start_ms = e['timestamp']['seconds'] * 1000000 + \
+ e['timestamp']['microseconds']
+
+ e = vm.events_wait((('BLOCK_JOB_READY', None),
+ ('BLOCK_JOB_COMPLETED', None),
+ ('BLOCK_JOB_FAILED', None)), timeout=True)
+ if e['event'] not in ('BLOCK_JOB_READY', 'BLOCK_JOB_COMPLETED'):
+ vm.shutdown()
+ return {'error': 'block-job failed: ' + str(e),
+ 'vm-log': vm.get_log()}
+ end_ms = e['timestamp']['seconds'] * 1000000 + \
+ e['timestamp']['microseconds']
+ finally:
+ vm.shutdown()
+
+ return {'seconds': (end_ms - start_ms) / 1000000.0}
+
+
+# Bench backup or mirror
+def bench_block_copy(qemu_binary, cmd, source, target):
+ """Helper to run bench_block_job() for mirror or backup"""
+ assert cmd in ('blockdev-backup', 'blockdev-mirror')
+
+ source['node-name'] = 'source'
+ target['node-name'] = 'target'
+
+ return bench_block_job(cmd,
+ {'job-id': 'job0', 'device': 'source',
+ 'target': 'target', 'sync': 'full'},
+ [qemu_binary,
+ '-blockdev', json.dumps(source),
+ '-blockdev', json.dumps(target)])
+
+
+def drv_file(filename):
+ return {'driver': 'file', 'filename': filename,
+ 'cache': {'direct': True}, 'aio': 'native'}
+
+
+def drv_nbd(host, port):
+ return {'driver': 'nbd',
+ 'server': {'type': 'inet', 'host': host, 'port': port}}
+
+
+if __name__ == '__main__':
+ import sys
+
+ if len(sys.argv) < 4:
+ print('USAGE: {} <qmp block-job command name> '
+ '<json string of arguments for the command> '
+ '<qemu binary path and arguments>'.format(sys.argv[0]))
+ exit(1)
+
+ res = bench_block_job(sys.argv[1], json.loads(sys.argv[2]), sys.argv[3:])
+ if 'seconds' in res:
+ print('{:.2f}'.format(res['seconds']))
+ else:
+ print(res)
diff --git a/scripts/simplebench/simplebench.py b/scripts/simplebench/simplebench.py
new file mode 100644
index 0000000..59e7314
--- /dev/null
+++ b/scripts/simplebench/simplebench.py
@@ -0,0 +1,128 @@
+#!/usr/bin/env python
+#
+# Simple benchmarking framework
+#
+# Copyright (c) 2019 Virtuozzo International GmbH.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
+ """Benchmark one test-case
+
+ test_func -- benchmarking function with prototype
+ test_func(env, case), which takes test_env and test_case
+ arguments and returns {'seconds': int} (which is benchmark
+ result) on success and {'error': str} on error. Returned
+ dict may contain any other additional fields.
+ test_env -- test environment - opaque first argument for test_func
+ test_case -- test case - opaque second argument for test_func
+ count -- how many times to call test_func, to calculate average
+ initial_run -- do initial run of test_func, which don't get into result
+
+ Returns dict with the following fields:
+ 'runs': list of test_func results
+ 'average': average seconds per run (exists only if at least one run
+ succeeded)
+ 'delta': maximum delta between test_func result and the average
+ (exists only if at least one run succeeded)
+ 'n-failed': number of failed runs (exists only if at least one run
+ failed)
+ """
+ if initial_run:
+ print(' #initial run:')
+ print(' ', test_func(test_env, test_case))
+
+ runs = []
+ for i in range(count):
+ print(' #run {}'.format(i+1))
+ res = test_func(test_env, test_case)
+ print(' ', res)
+ runs.append(res)
+
+ result = {'runs': runs}
+
+ successed = [r for r in runs if ('seconds' in r)]
+ if successed:
+ avg = sum(r['seconds'] for r in successed) / len(successed)
+ result['average'] = avg
+ result['delta'] = max(abs(r['seconds'] - avg) for r in successed)
+
+ if len(successed) < count:
+ result['n-failed'] = count - len(successed)
+
+ return result
+
+
+def ascii_one(result):
+ """Return ASCII representation of bench_one() returned dict."""
+ if 'average' in result:
+ s = '{:.2f} +- {:.2f}'.format(result['average'], result['delta'])
+ if 'n-failed' in result:
+ s += '\n({} failed)'.format(result['n-failed'])
+ return s
+ else:
+ return 'FAILED'
+
+
+def bench(test_func, test_envs, test_cases, *args, **vargs):
+ """Fill benchmark table
+
+ test_func -- benchmarking function, see bench_one for description
+ test_envs -- list of test environments, see bench_one
+ test_cases -- list of test cases, see bench_one
+ args, vargs -- additional arguments for bench_one
+
+ Returns dict with the following fields:
+ 'envs': test_envs
+ 'cases': test_cases
+ 'tab': filled 2D array, where cell [i][j] is bench_one result for
+ test_cases[i] for test_envs[j] (i.e., rows are test cases and
+ columns are test environments)
+ """
+ tab = {}
+ results = {
+ 'envs': test_envs,
+ 'cases': test_cases,
+ 'tab': tab
+ }
+ n = 1
+ n_tests = len(test_envs) * len(test_cases)
+ for env in test_envs:
+ for case in test_cases:
+ print('Testing {}/{}: {} :: {}'.format(n, n_tests,
+ env['id'], case['id']))
+ if case['id'] not in tab:
+ tab[case['id']] = {}
+ tab[case['id']][env['id']] = bench_one(test_func, env, case,
+ *args, **vargs)
+ n += 1
+
+ print('Done')
+ return results
+
+
+def ascii(results):
+ """Return ASCII representation of bench() returned dict."""
+ from tabulate import tabulate
+
+ tab = [[""] + [c['id'] for c in results['envs']]]
+ for case in results['cases']:
+ row = [case['id']]
+ for env in results['envs']:
+ row.append(ascii_one(results['tab'][case['id']][env['id']]))
+ tab.append(row)
+
+ return tabulate(tab)