forked from swarm64/s64da-benchmark-toolkit
-
Notifications
You must be signed in to change notification settings - Fork 0
/
run_benchmark
executable file
·151 lines (116 loc) · 5.46 KB
/
run_benchmark
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
#!/usr/bin/env python3
import argparse
import logging
import os
import sys
from checks import check_program_exists
from logging.config import fileConfig
from benchmarks import htap
from s64da_benchmark_toolkit.streams import Streams, Benchmark
fileConfig('logging.ini')
logger = logging.getLogger()
def check_requirements():
py_version_info = sys.version_info
if py_version_info < (3, 10):
logger.error('Your version of Python does not match the requirements.')
sys.exit()
check_program_exists('psql')
def find_benchmarks():
benchmarks = []
benchmarks_dir = 'benchmarks'
try:
for directory in os.listdir('benchmarks'):
base_dir = os.path.join(benchmarks_dir, directory)
benchmarks.append(Benchmark(name=directory, base_dir=base_dir))
except FileNotFoundError:
pass
return benchmarks
def add_streams_parsers(subparsers, streams_benchmarks):
for benchmark in streams_benchmarks:
streams_parser = subparsers.add_parser(benchmark.name)
streams_parser.add_argument('--config', required=False, default=None, help=(
'Optional YAML override configuration file to be applied before running. '
'Supply a full path to the file.'
))
streams_parser.add_argument('--timeout', default=None, help=(
'Statement timeout to be used. If there is a timeout in the override '
'configuration that one will take precedence.'
))
streams_parser.add_argument('--streams', type=int, default=0, help=(
'How many streams (virtual users) to run in parallel for the selected '
'benchmark. Pass "0" to run a single stream. The default is "0".'
))
streams_parser.add_argument('--stream-offset', type=int, default=1, help=(
'With which stream to start if running multiple streams. Defaults to "1".'
))
streams_parser.add_argument('--netdata-output-file', default=None, help=(
'File to write netdata stats into. Requires "netdata" key to be '
'present in configuration.'
))
streams_parser.add_argument('--output', choices=['csv', 'print'], default='print',
nargs='+', help=('How the results output should look like. '
'Multiple options possible, separated by space'
))
streams_parser.add_argument('--csv-file', default='results.csv', help=(
'Where to save the csv file, if csv output is selected. '
'The default is results.csv in the current directory.'
))
streams_parser.add_argument('--check-correctness', action='store_true', default=False,
help=('Flag to check correctness. Additionally, each query output will be '
'stored in the query_results folder.'
))
# FIXME: for some of these no correctness results exist
scale_factors = (10, 100, 300, 1000, 3000, 5000, 8000)
scale_factor_required = any(arg in ['--check-correctness', 'tpcds'] for arg in sys.argv)
streams_parser.add_argument('--scale-factor', choices=scale_factors, type=int,
default=None, required=scale_factor_required, help=(
'Scale factor of correctness result to compare with the query output.'
))
streams_parser.add_argument('--explain-analyze', action='store_true', default=False,
help=('Whether to run EXPLAIN ANALYZE. Will save plans into the "plan" directory.'
))
def parse_arguments(argv, benchmarks):
common_parser = argparse.ArgumentParser(
argument_default=argparse.SUPPRESS, formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
common_parser.add_argument('--dsn', required=True, help=('The PostgreSQL DSN to connect to. '
'Supply with DB, e.g. postgresql://postgres@localhost/dbname'
))
common_parser.add_argument('--use-server-side-cursors', default=False, action='store_true',
required=False, help=('Use server-side cursors for executing the queries')
)
common_parser.add_argument('--umbra', default=False, action='store_true',
required=False, help=('Run in Umbra compatibility mode')
)
subparsers = common_parser.add_subparsers(dest='benchmark')
add_streams_parsers(subparsers, benchmarks)
htap.add_parser(subparsers)
return common_parser.parse_args(argv)
if __name__ == '__main__':
check_requirements()
# Remove deprecated `--benchmark` parameter as these are now subparsers and warn if it was there
benchmark_param_found = False
argv = []
for item in sys.argv:
if item.startswith('--benchmark='):
benchmark_param_found = True
argv.append(item.replace('--benchmark=', ''))
elif item == '--benchmark':
benchmark_param_found = True
continue
else:
argv.append(item)
if benchmark_param_found:
logger.warn('The parameter `--benchmark` is deprecated. Provide the name of the benchmark '
'you want to run as a positional argument after the `--dsn` parameter common '
'to all benchmarks.')
benchmarks = [b for b in find_benchmarks() if b.name != 'htap']
args = parse_arguments(argv[1:], benchmarks)
if args.benchmark == 'htap':
htap.run(args)
else:
for benchmark in benchmarks:
if benchmark.name == args.benchmark:
benchmark_to_run = benchmark
break
Streams(args, benchmark_to_run).run()