aboutsummaryrefslogtreecommitdiffstats
path: root/scripts/simplebench
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/simplebench')
-rwxr-xr-xscripts/simplebench/bench-backup.py228
-rw-r--r--scripts/simplebench/bench-example.py81
-rwxr-xr-xscripts/simplebench/bench_block_job.py159
-rwxr-xr-xscripts/simplebench/bench_prealloc.py132
-rwxr-xr-xscripts/simplebench/bench_write_req.py171
-rwxr-xr-xscripts/simplebench/img_bench_templater.py95
-rwxr-xr-xscripts/simplebench/results_to_text.py126
-rw-r--r--scripts/simplebench/simplebench.py140
-rw-r--r--scripts/simplebench/table_templater.py62
9 files changed, 1194 insertions, 0 deletions
diff --git a/scripts/simplebench/bench-backup.py b/scripts/simplebench/bench-backup.py
new file mode 100755
index 000000000..5a0675c59
--- /dev/null
+++ b/scripts/simplebench/bench-backup.py
@@ -0,0 +1,228 @@
+#!/usr/bin/env python3
+#
+# Bench backup block-job
+#
+# Copyright (c) 2020 Virtuozzo International GmbH.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import argparse
+import json
+
+import simplebench
+from results_to_text import results_to_text
+from bench_block_job import bench_block_copy, drv_file, drv_nbd, drv_qcow2
+
+
+def bench_func(env, case):
+ """ Handle one "cell" of benchmarking table. """
+ cmd_options = env['cmd-options'] if 'cmd-options' in env else {}
+ return bench_block_copy(env['qemu-binary'], env['cmd'],
+ cmd_options,
+ case['source'], case['target'])
+
+
+def bench(args):
+ test_cases = []
+
+ # paths with colon not supported, so we just split by ':'
+ dirs = dict(d.split(':') for d in args.dir)
+
+ nbd_drv = None
+ if args.nbd:
+ nbd = args.nbd.split(':')
+ host = nbd[0]
+ port = '10809' if len(nbd) == 1 else nbd[1]
+ nbd_drv = drv_nbd(host, port)
+
+ for t in args.test:
+ src, dst = t.split(':')
+
+ if src == 'nbd' and dst == 'nbd':
+ raise ValueError("Can't use 'nbd' label for both src and dst")
+
+ if (src == 'nbd' or dst == 'nbd') and not nbd_drv:
+ raise ValueError("'nbd' label used but --nbd is not given")
+
+ if src == 'nbd':
+ source = nbd_drv
+ elif args.qcow2_sources:
+ source = drv_qcow2(drv_file(dirs[src] + '/test-source.qcow2'))
+ else:
+ source = drv_file(dirs[src] + '/test-source')
+
+ if dst == 'nbd':
+ test_cases.append({'id': t, 'source': source, 'target': nbd_drv})
+ continue
+
+ if args.target_cache == 'both':
+ target_caches = ['direct', 'cached']
+ else:
+ target_caches = [args.target_cache]
+
+ for c in target_caches:
+ o_direct = c == 'direct'
+ fname = dirs[dst] + '/test-target'
+ if args.compressed:
+ fname += '.qcow2'
+ target = drv_file(fname, o_direct=o_direct)
+ if args.compressed:
+ target = drv_qcow2(target)
+
+ test_id = t
+ if args.target_cache == 'both':
+ test_id += f'({c})'
+
+ test_cases.append({'id': test_id, 'source': source,
+ 'target': target})
+
+ binaries = [] # list of (<label>, <path>, [<options>])
+ for i, q in enumerate(args.env):
+ name_path = q.split(':')
+ if len(name_path) == 1:
+ label = f'q{i}'
+ path_opts = name_path[0].split(',')
+ else:
+ assert len(name_path) == 2 # paths with colon not supported
+ label = name_path[0]
+ path_opts = name_path[1].split(',')
+
+ binaries.append((label, path_opts[0], path_opts[1:]))
+
+ test_envs = []
+
+ bin_paths = {}
+ for i, q in enumerate(args.env):
+ opts = q.split(',')
+ label_path = opts[0]
+ opts = opts[1:]
+
+ if ':' in label_path:
+ # path with colon inside is not supported
+ label, path = label_path.split(':')
+ bin_paths[label] = path
+ elif label_path in bin_paths:
+ label = label_path
+ path = bin_paths[label]
+ else:
+ path = label_path
+ label = f'q{i}'
+ bin_paths[label] = path
+
+ x_perf = {}
+ is_mirror = False
+ for opt in opts:
+ if opt == 'mirror':
+ is_mirror = True
+ elif opt == 'copy-range=on':
+ x_perf['use-copy-range'] = True
+ elif opt == 'copy-range=off':
+ x_perf['use-copy-range'] = False
+ elif opt.startswith('max-workers='):
+ x_perf['max-workers'] = int(opt.split('=')[1])
+
+ backup_options = {}
+ if x_perf:
+ backup_options['x-perf'] = x_perf
+
+ if args.compressed:
+ backup_options['compress'] = True
+
+ if is_mirror:
+ assert not x_perf
+ test_envs.append({
+ 'id': f'mirror({label})',
+ 'cmd': 'blockdev-mirror',
+ 'qemu-binary': path
+ })
+ else:
+ test_envs.append({
+ 'id': f'backup({label})\n' + '\n'.join(opts),
+ 'cmd': 'blockdev-backup',
+ 'cmd-options': backup_options,
+ 'qemu-binary': path
+ })
+
+ result = simplebench.bench(bench_func, test_envs, test_cases,
+ count=args.count, initial_run=args.initial_run,
+ drop_caches=args.drop_caches)
+ with open('results.json', 'w') as f:
+ json.dump(result, f, indent=4)
+ print(results_to_text(result))
+
+
+class ExtendAction(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ items = getattr(namespace, self.dest) or []
+ items.extend(values)
+ setattr(namespace, self.dest, items)
+
+
+if __name__ == '__main__':
+ p = argparse.ArgumentParser('Backup benchmark', epilog='''
+ENV format
+
+ (LABEL:PATH|LABEL|PATH)[,max-workers=N][,use-copy-range=(on|off)][,mirror]
+
+ LABEL short name for the binary
+ PATH path to the binary
+ max-workers set x-perf.max-workers of backup job
+ use-copy-range set x-perf.use-copy-range of backup job
+ mirror use mirror job instead of backup''',
+ formatter_class=argparse.RawTextHelpFormatter)
+ p.add_argument('--env', nargs='+', help='''\
+Qemu binaries with labels and options, see below
+"ENV format" section''',
+ action=ExtendAction)
+ p.add_argument('--dir', nargs='+', help='''\
+Directories, each containing "test-source" and/or
+"test-target" files, raw images to used in
+benchmarking. File path with label, like
+label:/path/to/directory''',
+ action=ExtendAction)
+ p.add_argument('--nbd', help='''\
+host:port for remote NBD image, (or just host, for
+default port 10809). Use it in tests, label is "nbd"
+(but you cannot create test nbd:nbd).''')
+ p.add_argument('--test', nargs='+', help='''\
+Tests, in form source-dir-label:target-dir-label''',
+ action=ExtendAction)
+ p.add_argument('--compressed', help='''\
+Use compressed backup. It automatically means
+automatically creating qcow2 target with
+lazy_refcounts for each test run''', action='store_true')
+ p.add_argument('--qcow2-sources', help='''\
+Use test-source.qcow2 images as sources instead of
+test-source raw images''', action='store_true')
+ p.add_argument('--target-cache', help='''\
+Setup cache for target nodes. Options:
+ direct: default, use O_DIRECT and aio=native
+ cached: use system cache (Qemu default) and aio=threads (Qemu default)
+ both: generate two test cases for each src:dst pair''',
+ default='direct', choices=('direct', 'cached', 'both'))
+
+ p.add_argument('--count', type=int, default=3, help='''\
+Number of test runs per table cell''')
+
+ # BooleanOptionalAction helps to support --no-initial-run option
+ p.add_argument('--initial-run', action=argparse.BooleanOptionalAction,
+ help='''\
+Do additional initial run per cell which doesn't count in result,
+default true''')
+
+ p.add_argument('--drop-caches', action='store_true', help='''\
+Do "sync; echo 3 > /proc/sys/vm/drop_caches" before each test run''')
+
+ bench(p.parse_args())
diff --git a/scripts/simplebench/bench-example.py b/scripts/simplebench/bench-example.py
new file mode 100644
index 000000000..4864435f3
--- /dev/null
+++ b/scripts/simplebench/bench-example.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python3
+#
+# Benchmark example
+#
+# Copyright (c) 2019 Virtuozzo International GmbH.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import simplebench
+from results_to_text import results_to_text
+from bench_block_job import bench_block_copy, drv_file, drv_nbd
+
+
+def bench_func(env, case):
+ """ Handle one "cell" of benchmarking table. """
+ return bench_block_copy(env['qemu_binary'], env['cmd'], {}
+ case['source'], case['target'])
+
+
+# You may set the following five variables to correct values, to turn this
+# example to real benchmark.
+ssd_source = '/path-to-raw-source-image-at-ssd'
+ssd_target = '/path-to-raw-target-image-at-ssd'
+hdd_target = '/path-to-raw-source-image-at-hdd'
+nbd_ip = 'nbd-ip-addr'
+nbd_port = 'nbd-port-number'
+
+# Test-cases are "rows" in benchmark resulting table, 'id' is a caption for
+# the row, other fields are handled by bench_func.
+test_cases = [
+ {
+ 'id': 'ssd -> ssd',
+ 'source': drv_file(ssd_source),
+ 'target': drv_file(ssd_target)
+ },
+ {
+ 'id': 'ssd -> hdd',
+ 'source': drv_file(ssd_source),
+ 'target': drv_file(hdd_target)
+ },
+ {
+ 'id': 'ssd -> nbd',
+ 'source': drv_file(ssd_source),
+ 'target': drv_nbd(nbd_ip, nbd_port)
+ },
+]
+
+# Test-envs are "columns" in benchmark resulting table, 'id is a caption for
+# the column, other fields are handled by bench_func.
+test_envs = [
+ {
+ 'id': 'backup-1',
+ 'cmd': 'blockdev-backup',
+ 'qemu_binary': '/path-to-qemu-binary-1'
+ },
+ {
+ 'id': 'backup-2',
+ 'cmd': 'blockdev-backup',
+ 'qemu_binary': '/path-to-qemu-binary-2'
+ },
+ {
+ 'id': 'mirror',
+ 'cmd': 'blockdev-mirror',
+ 'qemu_binary': '/path-to-qemu-binary-1'
+ }
+]
+
+result = simplebench.bench(bench_func, test_envs, test_cases, count=3)
+print(results_to_text(result))
diff --git a/scripts/simplebench/bench_block_job.py b/scripts/simplebench/bench_block_job.py
new file mode 100755
index 000000000..a403c35b0
--- /dev/null
+++ b/scripts/simplebench/bench_block_job.py
@@ -0,0 +1,159 @@
+#!/usr/bin/env python3
+#
+# Benchmark block jobs
+#
+# Copyright (c) 2019 Virtuozzo International GmbH.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+import sys
+import os
+import subprocess
+import socket
+import json
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python'))
+from qemu.machine import QEMUMachine
+from qemu.qmp import QMPConnectError
+from qemu.aqmp import ConnectError
+
+
+def bench_block_job(cmd, cmd_args, qemu_args):
+ """Benchmark block-job
+
+ cmd -- qmp command to run block-job (like blockdev-backup)
+ cmd_args -- dict of qmp command arguments
+ qemu_args -- list of Qemu command line arguments, including path to Qemu
+ binary
+
+ Returns {'seconds': int} on success and {'error': str} on failure, dict may
+ contain addional 'vm-log' field. Return value is compatible with
+ simplebench lib.
+ """
+
+ vm = QEMUMachine(qemu_args[0], args=qemu_args[1:])
+
+ try:
+ vm.launch()
+ except OSError as e:
+ return {'error': 'popen failed: ' + str(e)}
+ except (QMPConnectError, ConnectError, socket.timeout):
+ return {'error': 'qemu failed: ' + str(vm.get_log())}
+
+ try:
+ res = vm.qmp(cmd, **cmd_args)
+ if res != {'return': {}}:
+ vm.shutdown()
+ return {'error': '"{}" command failed: {}'.format(cmd, str(res))}
+
+ e = vm.event_wait('JOB_STATUS_CHANGE')
+ assert e['data']['status'] == 'created'
+ start_ms = e['timestamp']['seconds'] * 1000000 + \
+ e['timestamp']['microseconds']
+
+ e = vm.events_wait((('BLOCK_JOB_READY', None),
+ ('BLOCK_JOB_COMPLETED', None),
+ ('BLOCK_JOB_FAILED', None)), timeout=True)
+ if e['event'] not in ('BLOCK_JOB_READY', 'BLOCK_JOB_COMPLETED'):
+ vm.shutdown()
+ return {'error': 'block-job failed: ' + str(e),
+ 'vm-log': vm.get_log()}
+ if 'error' in e['data']:
+ vm.shutdown()
+ return {'error': 'block-job failed: ' + e['data']['error'],
+ 'vm-log': vm.get_log()}
+ end_ms = e['timestamp']['seconds'] * 1000000 + \
+ e['timestamp']['microseconds']
+ finally:
+ vm.shutdown()
+
+ return {'seconds': (end_ms - start_ms) / 1000000.0}
+
+
+def get_image_size(path):
+ out = subprocess.run(['qemu-img', 'info', '--out=json', path],
+ stdout=subprocess.PIPE, check=True).stdout
+ return json.loads(out)['virtual-size']
+
+
+def get_blockdev_size(obj):
+ img = obj['filename'] if 'filename' in obj else obj['file']['filename']
+ return get_image_size(img)
+
+
+# Bench backup or mirror
+def bench_block_copy(qemu_binary, cmd, cmd_options, source, target):
+ """Helper to run bench_block_job() for mirror or backup"""
+ assert cmd in ('blockdev-backup', 'blockdev-mirror')
+
+ if target['driver'] == 'qcow2':
+ try:
+ os.remove(target['file']['filename'])
+ except OSError:
+ pass
+
+ subprocess.run(['qemu-img', 'create', '-f', 'qcow2',
+ target['file']['filename'],
+ str(get_blockdev_size(source))],
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL, check=True)
+
+ source['node-name'] = 'source'
+ target['node-name'] = 'target'
+
+ cmd_options['job-id'] = 'job0'
+ cmd_options['device'] = 'source'
+ cmd_options['target'] = 'target'
+ cmd_options['sync'] = 'full'
+
+ return bench_block_job(cmd, cmd_options,
+ [qemu_binary,
+ '-blockdev', json.dumps(source),
+ '-blockdev', json.dumps(target)])
+
+
+def drv_file(filename, o_direct=True):
+ node = {'driver': 'file', 'filename': filename}
+ if o_direct:
+ node['cache'] = {'direct': True}
+ node['aio'] = 'native'
+
+ return node
+
+
+def drv_nbd(host, port):
+ return {'driver': 'nbd',
+ 'server': {'type': 'inet', 'host': host, 'port': port}}
+
+
+def drv_qcow2(file):
+ return {'driver': 'qcow2', 'file': file}
+
+
+if __name__ == '__main__':
+ import sys
+
+ if len(sys.argv) < 4:
+ print('USAGE: {} <qmp block-job command name> '
+ '<json string of arguments for the command> '
+ '<qemu binary path and arguments>'.format(sys.argv[0]))
+ exit(1)
+
+ res = bench_block_job(sys.argv[1], json.loads(sys.argv[2]), sys.argv[3:])
+ if 'seconds' in res:
+ print('{:.2f}'.format(res['seconds']))
+ else:
+ print(res)
diff --git a/scripts/simplebench/bench_prealloc.py b/scripts/simplebench/bench_prealloc.py
new file mode 100755
index 000000000..85f588c59
--- /dev/null
+++ b/scripts/simplebench/bench_prealloc.py
@@ -0,0 +1,132 @@
+#!/usr/bin/env python3
+#
+# Benchmark preallocate filter
+#
+# Copyright (c) 2020 Virtuozzo International GmbH.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+import sys
+import os
+import subprocess
+import re
+import json
+
+import simplebench
+from results_to_text import results_to_text
+
+
+def qemu_img_bench(args):
+ p = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
+ universal_newlines=True)
+
+ if p.returncode == 0:
+ try:
+ m = re.search(r'Run completed in (\d+.\d+) seconds.', p.stdout)
+ return {'seconds': float(m.group(1))}
+ except Exception:
+ return {'error': f'failed to parse qemu-img output: {p.stdout}'}
+ else:
+ return {'error': f'qemu-img failed: {p.returncode}: {p.stdout}'}
+
+
+def bench_func(env, case):
+ fname = f"{case['dir']}/prealloc-test.qcow2"
+ try:
+ os.remove(fname)
+ except OSError:
+ pass
+
+ subprocess.run([env['qemu-img-binary'], 'create', '-f', 'qcow2', fname,
+ '16G'], stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL, check=True)
+
+ args = [env['qemu-img-binary'], 'bench', '-c', str(case['count']),
+ '-d', '64', '-s', case['block-size'], '-t', 'none', '-n', '-w']
+ if env['prealloc']:
+ args += ['--image-opts',
+ 'driver=qcow2,file.driver=preallocate,file.file.driver=file,'
+ f'file.file.filename={fname}']
+ else:
+ args += ['-f', 'qcow2', fname]
+
+ return qemu_img_bench(args)
+
+
+def auto_count_bench_func(env, case):
+ case['count'] = 100
+ while True:
+ res = bench_func(env, case)
+ if 'error' in res:
+ return res
+
+ if res['seconds'] >= 1:
+ break
+
+ case['count'] *= 10
+
+ if res['seconds'] < 5:
+ case['count'] = round(case['count'] * 5 / res['seconds'])
+ res = bench_func(env, case)
+ if 'error' in res:
+ return res
+
+ res['iops'] = case['count'] / res['seconds']
+ return res
+
+
+if __name__ == '__main__':
+ if len(sys.argv) < 2:
+ print(f'USAGE: {sys.argv[0]} <qemu-img binary> '
+ 'DISK_NAME:DIR_PATH ...')
+ exit(1)
+
+ qemu_img = sys.argv[1]
+
+ envs = [
+ {
+ 'id': 'no-prealloc',
+ 'qemu-img-binary': qemu_img,
+ 'prealloc': False
+ },
+ {
+ 'id': 'prealloc',
+ 'qemu-img-binary': qemu_img,
+ 'prealloc': True
+ }
+ ]
+
+ aligned_cases = []
+ unaligned_cases = []
+
+ for disk in sys.argv[2:]:
+ name, path = disk.split(':')
+ aligned_cases.append({
+ 'id': f'{name}, aligned sequential 16k',
+ 'block-size': '16k',
+ 'dir': path
+ })
+ unaligned_cases.append({
+ 'id': f'{name}, unaligned sequential 64k',
+ 'block-size': '16k',
+ 'dir': path
+ })
+
+ result = simplebench.bench(auto_count_bench_func, envs,
+ aligned_cases + unaligned_cases, count=5)
+ print(results_to_text(result))
+ with open('results.json', 'w') as f:
+ json.dump(result, f, indent=4)
diff --git a/scripts/simplebench/bench_write_req.py b/scripts/simplebench/bench_write_req.py
new file mode 100755
index 000000000..da601ea2f
--- /dev/null
+++ b/scripts/simplebench/bench_write_req.py
@@ -0,0 +1,171 @@
+#!/usr/bin/env python3
+#
+# Test to compare performance of write requests for two qemu-img binary files.
+#
+# The idea of the test comes from intention to check the benefit of c8bb23cbdbe
+# "qcow2: skip writing zero buffers to empty COW areas".
+#
+# Copyright (c) 2020 Virtuozzo International GmbH.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+import sys
+import os
+import subprocess
+import simplebench
+from results_to_text import results_to_text
+
+
+def bench_func(env, case):
+ """ Handle one "cell" of benchmarking table. """
+ return bench_write_req(env['qemu_img'], env['image_name'],
+ case['block_size'], case['block_offset'],
+ case['cluster_size'])
+
+
+def qemu_img_pipe(*args):
+ '''Run qemu-img and return its output'''
+ subp = subprocess.Popen(list(args),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ universal_newlines=True)
+ exitcode = subp.wait()
+ if exitcode < 0:
+ sys.stderr.write('qemu-img received signal %i: %s\n'
+ % (-exitcode, ' '.join(list(args))))
+ return subp.communicate()[0]
+
+
+def bench_write_req(qemu_img, image_name, block_size, block_offset,
+ cluster_size):
+ """Benchmark write requests
+
+ The function creates a QCOW2 image with the given path/name. Then it runs
+ the 'qemu-img bench' command and makes series of write requests on the
+ image clusters. Finally, it returns the total time of the write operations
+ on the disk.
+
+ qemu_img -- path to qemu_img executable file
+ image_name -- QCOW2 image name to create
+ block_size -- size of a block to write to clusters
+ block_offset -- offset of the block in clusters
+ cluster_size -- size of the image cluster
+
+ Returns {'seconds': int} on success and {'error': str} on failure.
+ Return value is compatible with simplebench lib.
+ """
+
+ if not os.path.isfile(qemu_img):
+ print(f'File not found: {qemu_img}')
+ sys.exit(1)
+
+ image_dir = os.path.dirname(os.path.abspath(image_name))
+ if not os.path.isdir(image_dir):
+ print(f'Path not found: {image_name}')
+ sys.exit(1)
+
+ image_size = 1024 * 1024 * 1024
+
+ args_create = [qemu_img, 'create', '-f', 'qcow2', '-o',
+ f'cluster_size={cluster_size}',
+ image_name, str(image_size)]
+
+ count = int(image_size / cluster_size) - 1
+ step = str(cluster_size)
+
+ args_bench = [qemu_img, 'bench', '-w', '-n', '-t', 'none', '-c',
+ str(count), '-s', f'{block_size}', '-o', str(block_offset),
+ '-S', step, '-f', 'qcow2', image_name]
+
+ try:
+ qemu_img_pipe(*args_create)
+ except OSError as e:
+ os.remove(image_name)
+ return {'error': 'qemu_img create failed: ' + str(e)}
+
+ try:
+ ret = qemu_img_pipe(*args_bench)
+ except OSError as e:
+ os.remove(image_name)
+ return {'error': 'qemu_img bench failed: ' + str(e)}
+
+ os.remove(image_name)
+
+ if 'seconds' in ret:
+ ret_list = ret.split()
+ index = ret_list.index('seconds.')
+ return {'seconds': float(ret_list[index-1])}
+ else:
+ return {'error': 'qemu_img bench failed: ' + ret}
+
+
+if __name__ == '__main__':
+
+ if len(sys.argv) < 4:
+ program = os.path.basename(sys.argv[0])
+ print(f'USAGE: {program} <path to qemu-img binary file> '
+ '<path to another qemu-img to compare performance with> '
+ '<full or relative name for QCOW2 image to create>')
+ exit(1)
+
+ # Test-cases are "rows" in benchmark resulting table, 'id' is a caption
+ # for the row, other fields are handled by bench_func.
+ test_cases = [
+ {
+ 'id': '<cluster front>',
+ 'block_size': 4096,
+ 'block_offset': 0,
+ 'cluster_size': 1048576
+ },
+ {
+ 'id': '<cluster middle>',
+ 'block_size': 4096,
+ 'block_offset': 524288,
+ 'cluster_size': 1048576
+ },
+ {
+ 'id': '<cross cluster>',
+ 'block_size': 1048576,
+ 'block_offset': 4096,
+ 'cluster_size': 1048576
+ },
+ {
+ 'id': '<cluster 64K>',
+ 'block_size': 4096,
+ 'block_offset': 0,
+ 'cluster_size': 65536
+ },
+ ]
+
+ # Test-envs are "columns" in benchmark resulting table, 'id is a caption
+ # for the column, other fields are handled by bench_func.
+ # Set the paths below to desired values
+ test_envs = [
+ {
+ 'id': '<qemu-img binary 1>',
+ 'qemu_img': f'{sys.argv[1]}',
+ 'image_name': f'{sys.argv[3]}'
+ },
+ {
+ 'id': '<qemu-img binary 2>',
+ 'qemu_img': f'{sys.argv[2]}',
+ 'image_name': f'{sys.argv[3]}'
+ },
+ ]
+
+ result = simplebench.bench(bench_func, test_envs, test_cases, count=3,
+ initial_run=False)
+ print(results_to_text(result))
diff --git a/scripts/simplebench/img_bench_templater.py b/scripts/simplebench/img_bench_templater.py
new file mode 100755
index 000000000..f8e1540ad
--- /dev/null
+++ b/scripts/simplebench/img_bench_templater.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python3
+#
+# Process img-bench test templates
+#
+# Copyright (c) 2021 Virtuozzo International GmbH.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+import sys
+import subprocess
+import re
+import json
+
+import simplebench
+from results_to_text import results_to_text
+from table_templater import Templater
+
+
+def bench_func(env, case):
+ test = templater.gen(env['data'], case['data'])
+
+ p = subprocess.run(test, shell=True, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT, universal_newlines=True)
+
+ if p.returncode == 0:
+ try:
+ m = re.search(r'Run completed in (\d+.\d+) seconds.', p.stdout)
+ return {'seconds': float(m.group(1))}
+ except Exception:
+ return {'error': f'failed to parse qemu-img output: {p.stdout}'}
+ else:
+ return {'error': f'qemu-img failed: {p.returncode}: {p.stdout}'}
+
+
+if __name__ == '__main__':
+ if len(sys.argv) > 1:
+ print("""
+Usage: img_bench_templater.py < path/to/test-template.sh
+
+This script generates performance tests from a test template (example below),
+runs them, and displays the results in a table. The template is read from
+stdin. It must be written in bash and end with a `qemu-img bench` invocation
+(whose result is parsed to get the test instance’s result).
+
+Use the following syntax in the template to create the various different test
+instances:
+
+ column templating: {var1|var2|...} - test will use different values in
+ different columns. You may use several {} constructions in the test, in this
+ case product of all choice-sets will be used.
+
+ row templating: [var1|var2|...] - similar thing to define rows (test-cases)
+
+Test template example:
+
+Assume you want to compare two qemu-img binaries, called qemu-img-old and
+qemu-img-new in your build directory in two test-cases with 4K writes and 64K
+writes. The template may look like this:
+
+qemu_img=/path/to/qemu/build/qemu-img-{old|new}
+$qemu_img create -f qcow2 /ssd/x.qcow2 1G
+$qemu_img bench -c 100 -d 8 [-s 4K|-s 64K] -w -t none -n /ssd/x.qcow2
+
+When passing this to stdin of img_bench_templater.py, the resulting comparison
+table will contain two columns (for two binaries) and two rows (for two
+test-cases).
+
+In addition to displaying the results, script also stores results in JSON
+format into results.json file in current directory.
+""")
+ sys.exit()
+
+ templater = Templater(sys.stdin.read())
+
+ envs = [{'id': ' / '.join(x), 'data': x} for x in templater.columns]
+ cases = [{'id': ' / '.join(x), 'data': x} for x in templater.rows]
+
+ result = simplebench.bench(bench_func, envs, cases, count=5,
+ initial_run=False)
+ print(results_to_text(result))
+ with open('results.json', 'w') as f:
+ json.dump(result, f, indent=4)
diff --git a/scripts/simplebench/results_to_text.py b/scripts/simplebench/results_to_text.py
new file mode 100755
index 000000000..d561e5e2d
--- /dev/null
+++ b/scripts/simplebench/results_to_text.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python3
+#
+# Simple benchmarking framework
+#
+# Copyright (c) 2019 Virtuozzo International GmbH.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import math
+import tabulate
+
+# We want leading whitespace for difference row cells (see below)
+tabulate.PRESERVE_WHITESPACE = True
+
+
+def format_value(x, stdev):
+ stdev_pr = stdev / x * 100
+ if stdev_pr < 1.5:
+ # don't care too much
+ return f'{x:.2g}'
+ else:
+ return f'{x:.2g} ± {math.ceil(stdev_pr)}%'
+
+
+def result_to_text(result):
+ """Return text representation of bench_one() returned dict."""
+ if 'average' in result:
+ s = format_value(result['average'], result['stdev'])
+ if 'n-failed' in result:
+ s += '\n({} failed)'.format(result['n-failed'])
+ return s
+ else:
+ return 'FAILED'
+
+
+def results_dimension(results):
+ dim = None
+ for case in results['cases']:
+ for env in results['envs']:
+ res = results['tab'][case['id']][env['id']]
+ if dim is None:
+ dim = res['dimension']
+ else:
+ assert dim == res['dimension']
+
+ assert dim in ('iops', 'seconds')
+
+ return dim
+
+
+def results_to_text(results):
+ """Return text representation of bench() returned dict."""
+ n_columns = len(results['envs'])
+ named_columns = n_columns > 2
+ dim = results_dimension(results)
+ tab = []
+
+ if named_columns:
+ # Environment columns are named A, B, ...
+ tab.append([''] + [chr(ord('A') + i) for i in range(n_columns)])
+
+ tab.append([''] + [c['id'] for c in results['envs']])
+
+ for case in results['cases']:
+ row = [case['id']]
+ case_results = results['tab'][case['id']]
+ for env in results['envs']:
+ res = case_results[env['id']]
+ row.append(result_to_text(res))
+ tab.append(row)
+
+ # Add row of difference between columns. For each column starting from
+ # B we calculate difference with all previous columns.
+ row = ['', ''] # case name and first column
+ for i in range(1, n_columns):
+ cell = ''
+ env = results['envs'][i]
+ res = case_results[env['id']]
+
+ if 'average' not in res:
+ # Failed result
+ row.append(cell)
+ continue
+
+ for j in range(0, i):
+ env_j = results['envs'][j]
+ res_j = case_results[env_j['id']]
+ cell += ' '
+
+ if 'average' not in res_j:
+ # Failed result
+ cell += '--'
+ continue
+
+ col_j = tab[0][j + 1] if named_columns else ''
+ diff_pr = round((res['average'] - res_j['average']) /
+ res_j['average'] * 100)
+ cell += f' {col_j}{diff_pr:+}%'
+ row.append(cell)
+ tab.append(row)
+
+ return f'All results are in {dim}\n\n' + tabulate.tabulate(tab)
+
+
+if __name__ == '__main__':
+ import sys
+ import json
+
+ if len(sys.argv) < 2:
+ print(f'USAGE: {sys.argv[0]} results.json')
+ exit(1)
+
+ with open(sys.argv[1]) as f:
+ print(results_to_text(json.load(f)))
diff --git a/scripts/simplebench/simplebench.py b/scripts/simplebench/simplebench.py
new file mode 100644
index 000000000..8efca2af9
--- /dev/null
+++ b/scripts/simplebench/simplebench.py
@@ -0,0 +1,140 @@
+#!/usr/bin/env python
+#
+# Simple benchmarking framework
+#
+# Copyright (c) 2019 Virtuozzo International GmbH.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import statistics
+import subprocess
+import time
+
+
+def do_drop_caches():
+ subprocess.run('sync; echo 3 > /proc/sys/vm/drop_caches', shell=True,
+ check=True)
+
+
+def bench_one(test_func, test_env, test_case, count=5, initial_run=True,
+ slow_limit=100, drop_caches=False):
+ """Benchmark one test-case
+
+ test_func -- benchmarking function with prototype
+ test_func(env, case), which takes test_env and test_case
+ arguments and on success returns dict with 'seconds' or
+ 'iops' (or both) fields, specifying the benchmark result.
+ If both 'iops' and 'seconds' provided, the 'iops' is
+ considered the main, and 'seconds' is just an additional
+ info. On failure test_func should return {'error': str}.
+ Returned dict may contain any other additional fields.
+ test_env -- test environment - opaque first argument for test_func
+ test_case -- test case - opaque second argument for test_func
+ count -- how many times to call test_func, to calculate average
+ initial_run -- do initial run of test_func, which don't get into result
+ slow_limit -- stop at slow run (that exceedes the slow_limit by seconds).
+ (initial run is not measured)
+ drop_caches -- drop caches before each run
+
+ Returns dict with the following fields:
+ 'runs': list of test_func results
+ 'dimension': dimension of results, may be 'seconds' or 'iops'
+ 'average': average value (iops or seconds) per run (exists only if at
+ least one run succeeded)
+ 'stdev': standard deviation of results
+ (exists only if at least one run succeeded)
+ 'n-failed': number of failed runs (exists only if at least one run
+ failed)
+ """
+ if initial_run:
+ print(' #initial run:')
+ do_drop_caches()
+ print(' ', test_func(test_env, test_case))
+
+ runs = []
+ for i in range(count):
+ t = time.time()
+
+ print(' #run {}'.format(i+1))
+ do_drop_caches()
+ res = test_func(test_env, test_case)
+ print(' ', res)
+ runs.append(res)
+
+ if time.time() - t > slow_limit:
+ print(' - run is too slow, stop here')
+ break
+
+ count = len(runs)
+
+ result = {'runs': runs}
+
+ succeeded = [r for r in runs if ('seconds' in r or 'iops' in r)]
+ if succeeded:
+ if 'iops' in succeeded[0]:
+ assert all('iops' in r for r in succeeded)
+ dim = 'iops'
+ else:
+ assert all('seconds' in r for r in succeeded)
+ assert all('iops' not in r for r in succeeded)
+ dim = 'seconds'
+ result['dimension'] = dim
+ result['average'] = statistics.mean(r[dim] for r in succeeded)
+ if len(succeeded) == 1:
+ result['stdev'] = 0
+ else:
+ result['stdev'] = statistics.stdev(r[dim] for r in succeeded)
+
+ if len(succeeded) < count:
+ result['n-failed'] = count - len(succeeded)
+
+ return result
+
+
+def bench(test_func, test_envs, test_cases, *args, **vargs):
+ """Fill benchmark table
+
+ test_func -- benchmarking function, see bench_one for description
+ test_envs -- list of test environments, see bench_one
+ test_cases -- list of test cases, see bench_one
+ args, vargs -- additional arguments for bench_one
+
+ Returns dict with the following fields:
+ 'envs': test_envs
+ 'cases': test_cases
+ 'tab': filled 2D array, where cell [i][j] is bench_one result for
+ test_cases[i] for test_envs[j] (i.e., rows are test cases and
+ columns are test environments)
+ """
+ tab = {}
+ results = {
+ 'envs': test_envs,
+ 'cases': test_cases,
+ 'tab': tab
+ }
+ n = 1
+ n_tests = len(test_envs) * len(test_cases)
+ for env in test_envs:
+ for case in test_cases:
+ print('Testing {}/{}: {} :: {}'.format(n, n_tests,
+ env['id'], case['id']))
+ if case['id'] not in tab:
+ tab[case['id']] = {}
+ tab[case['id']][env['id']] = bench_one(test_func, env, case,
+ *args, **vargs)
+ n += 1
+
+ print('Done')
+ return results
diff --git a/scripts/simplebench/table_templater.py b/scripts/simplebench/table_templater.py
new file mode 100644
index 000000000..950f3b302
--- /dev/null
+++ b/scripts/simplebench/table_templater.py
@@ -0,0 +1,62 @@
+# Parser for test templates
+#
+# Copyright (c) 2021 Virtuozzo International GmbH.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import itertools
+from lark import Lark
+
+grammar = """
+start: ( text | column_switch | row_switch )+
+
+column_switch: "{" text ["|" text]+ "}"
+row_switch: "[" text ["|" text]+ "]"
+text: /[^|{}\[\]]+/
+"""
+
+parser = Lark(grammar)
+
+class Templater:
+ def __init__(self, template):
+ self.tree = parser.parse(template)
+
+ c_switches = []
+ r_switches = []
+ for x in self.tree.children:
+ if x.data == 'column_switch':
+ c_switches.append([el.children[0].value for el in x.children])
+ elif x.data == 'row_switch':
+ r_switches.append([el.children[0].value for el in x.children])
+
+ self.columns = list(itertools.product(*c_switches))
+ self.rows = list(itertools.product(*r_switches))
+
+ def gen(self, column, row):
+ i = 0
+ j = 0
+ result = []
+
+ for x in self.tree.children:
+ if x.data == 'text':
+ result.append(x.children[0].value)
+ elif x.data == 'column_switch':
+ result.append(column[i])
+ i += 1
+ elif x.data == 'row_switch':
+ result.append(row[j])
+ j += 1
+
+ return ''.join(result)