]>
git.proxmox.com Git - ceph.git/blob - ceph/src/boost/libs/metaparse/tools/benchmark/benchmark.py
2 """Utility to benchmark the generated source files"""
4 # Copyright Abel Sinkovics (abel@sinkovics.hu) 2016.
5 # Distributed under the Boost Software License, Version 1.0.
6 # (See accompanying file LICENSE_1_0.txt or copy at
7 # http://www.boost.org/LICENSE_1_0.txt)
23 import matplotlib
.pyplot
# pylint:disable=I0011,C0411,C0412,C0413
26 def benchmark_command(cmd
, progress
):
27 """Benchmark one command execution"""
28 full_cmd
= '/usr/bin/time --format="%U %M" {0}'.format(cmd
)
29 print '{0:6.2f}% Running {1}'.format(100.0 * progress
, full_cmd
)
30 (_
, err
) = subprocess
.Popen(
31 ['/bin/sh', '-c', full_cmd
],
32 stdin
=subprocess
.PIPE
,
33 stdout
=subprocess
.PIPE
,
34 stderr
=subprocess
.PIPE
37 values
= err
.strip().split(' ')
40 return (float(values
[0]), float(values
[1]))
41 except: # pylint:disable=I0011,W0702
42 pass # Handled by the code after the "if"
45 raise Exception('Error during benchmarking')
49 filename
, compiler
, include_dirs
, (progress_from
, progress_to
),
50 iter_count
, extra_flags
= ''):
51 """Benchmark one file"""
54 for nth_run
in xrange(0, iter_count
):
55 (time_spent
, mem_used
) = benchmark_command(
56 '{0} -std=c++11 {1} -c {2} {3}'.format(
58 ' '.join('-I{0}'.format(i
) for i
in include_dirs
),
63 progress_to
* nth_run
+ progress_from
* (iter_count
- nth_run
)
66 os
.remove(os
.path
.splitext(os
.path
.basename(filename
))[0] + '.o')
67 time_sum
= time_sum
+ time_spent
68 mem_sum
= mem_sum
+ mem_used
71 "time": time_sum
/ iter_count
,
72 "memory": mem_sum
/ (iter_count
* 1024)
76 def compiler_info(compiler
):
77 """Determine the name + version of the compiler"""
78 (out
, err
) = subprocess
.Popen(
79 ['/bin/sh', '-c', '{0} -v'.format(compiler
)],
80 stdin
=subprocess
.PIPE
,
81 stdout
=subprocess
.PIPE
,
82 stderr
=subprocess
.PIPE
85 gcc_clang
= re
.compile('(gcc|clang) version ([0-9]+(\\.[0-9]+)*)')
87 for line
in (out
+ err
).split('\n'):
88 mtch
= gcc_clang
.search(line
)
90 return mtch
.group(1) + ' ' + mtch
.group(2)
95 def string_char(char
):
96 """Turn the character into one that can be part of a filename"""
97 return '_' if char
in [' ', '~', '(', ')', '/', '\\'] else char
100 def make_filename(string
):
101 """Turn the string into a filename"""
102 return ''.join(string_char(c
) for c
in string
)
105 def files_in_dir(path
, extension
):
106 """Enumartes the files in path with the given extension"""
107 ends
= '.{0}'.format(extension
)
108 return (f
for f
in os
.listdir(path
) if f
.endswith(ends
))
111 def format_time(seconds
):
112 """Format a duration"""
120 ('week', week
), ('day', day
), ('hour', hour
),
121 ('minute', minute
), ('second', 1)
124 value
= seconds
// dur
126 '{0} {1}{2}'.format(int(value
), name
, 's' if value
> 1 else '')
128 seconds
= seconds
% dur
129 return ' '.join(result
)
132 def benchmark(src_dir
, compiler
, include_dirs
, iter_count
):
133 """Do the benchmarking"""
135 files
= list(files_in_dir(src_dir
, 'cpp'))
136 random
.shuffle(files
)
137 has_string_templates
= True
138 string_template_file_cnt
= sum(1 for file in files
if 'bmp' in file)
139 file_count
= len(files
) + string_template_file_cnt
141 started_at
= time
.time()
143 for filename
in files
:
144 progress
= len(result
)
145 result
[filename
] = benchmark_file(
146 os
.path
.join(src_dir
, filename
),
149 (float(progress
) / file_count
, float(progress
+ 1) / file_count
),
152 if 'bmp' in filename
and has_string_templates
:
154 temp_result
= benchmark_file(
155 os
.path
.join(src_dir
, filename
),
158 (float(progress
+ 1) / file_count
, float(progress
+ 2) / file_count
),
160 '-Xclang -fstring-literal-templates'
162 result
[filename
.replace('bmp', 'slt')] = temp_result
164 has_string_templates
= False
165 file_count
-= string_template_file_cnt
166 print 'Stopping the benchmarking of string literal templates'
168 elapsed
= time
.time() - started_at
169 total
= float(file_count
* elapsed
) / len(result
)
170 print 'Elapsed time: {0}, Remaining time: {1}'.format(
171 format_time(elapsed
),
172 format_time(total
- elapsed
)
177 def plot(values
, mode_names
, title
, (xlabel
, ylabel
), out_file
):
179 matplotlib
.pyplot
.clf()
180 for mode
, mode_name
in mode_names
.iteritems():
182 matplotlib
.pyplot
.plot(
183 [x
for x
, _
in vals
],
184 [y
for _
, y
in vals
],
187 matplotlib
.pyplot
.title(title
)
188 matplotlib
.pyplot
.xlabel(xlabel
)
189 matplotlib
.pyplot
.ylabel(ylabel
)
190 if len(mode_names
) > 1:
191 matplotlib
.pyplot
.legend()
192 matplotlib
.pyplot
.savefig(out_file
)
203 def configs_in(src_dir
):
204 """Enumerate all configs in src_dir"""
205 for filename
in files_in_dir(src_dir
, 'json'):
206 with
open(os
.path
.join(src_dir
, filename
), 'rb') as in_f
:
207 yield json
.load(in_f
)
210 def byte_to_gb(byte
):
211 """Convert bytes to GB"""
212 return byte
/ (1024.0 * 1024 * 1024)
215 def join_images(img_files
, out_file
):
216 """Join the list of images into the out file"""
217 images
= [PIL
.Image
.open(f
) for f
in img_files
]
218 joined
= PIL
.Image
.new(
220 (sum(i
.size
[0] for i
in images
), max(i
.size
[1] for i
in images
))
224 joined
.paste(im
=img
, box
=(left
, 0))
225 left
= left
+ img
.size
[0]
226 joined
.save(out_file
)
229 def plot_temp_diagrams(config
, results
, temp_dir
):
230 """Plot temporary diagrams"""
232 'time': 'Compilation time (s)',
233 'memory': 'Compiler memory usage (MB)',
236 files
= config
['files']
239 if any('slt' in result
for result
in results
) and 'bmp' in files
.values()[0]:
240 config
['modes']['slt'] = 'Using BOOST_METAPARSE_STRING with string literal templates'
241 for f
in files
.values():
242 f
['slt'] = f
['bmp'].replace('bmp', 'slt')
244 for measured
in ['time', 'memory']:
245 mpts
= sorted(int(k
) for k
in files
.keys())
246 img_files
.append(os
.path
.join(temp_dir
, '_{0}.png'.format(measured
)))
249 m
: [(x
, results
[files
[str(x
)][m
]][measured
]) for x
in mpts
]
250 for m
in config
['modes'].keys()
253 display_name
[measured
],
254 (config
['x_axis_label'], display_name
[measured
]),
260 def plot_diagram(config
, results
, images_dir
, out_filename
):
261 """Plot one diagram"""
262 img_files
= plot_temp_diagrams(config
, results
, images_dir
)
263 join_images(img_files
, out_filename
)
264 for img_file
in img_files
:
268 def plot_diagrams(results
, configs
, compiler
, out_dir
):
269 """Plot all diagrams specified by the configs"""
270 compiler_fn
= make_filename(compiler
)
271 total
= psutil
.virtual_memory().total
# pylint:disable=I0011,E1101
272 memory
= int(math
.ceil(byte_to_gb(total
)))
274 images_dir
= os
.path
.join(out_dir
, 'images')
276 for config
in configs
:
277 out_prefix
= '{0}_{1}'.format(config
['name'], compiler_fn
)
283 os
.path
.join(images_dir
, '{0}.png'.format(out_prefix
))
287 os
.path
.join(out_dir
, '{0}.qbk'.format(out_prefix
)),
291 Measured on a {2} host with {3} GB memory. Compiler used: {4}.
293 [$images/metaparse/{1}.png [width 100%]]
294 """.format(config
['desc'], out_prefix
, platform
.platform(), memory
, compiler
)
295 out_f
.write(qbk_content
)
299 """The main function of the script"""
300 desc
= 'Benchmark the files generated by generate.py'
301 parser
= argparse
.ArgumentParser(description
=desc
)
306 help='The directory containing the sources to benchmark'
312 help='The output directory'
318 help='The directory containing the headeres for the benchmark'
322 dest
='boost_headers',
323 default
='../../../..',
324 help='The directory containing the Boost headers (the boost directory)'
330 help='The compiler to do the benchmark with'
337 help='How many times a measurement should be repeated.'
340 args
= parser
.parse_args()
342 compiler
= compiler_info(args
.compiler
)
346 [args
.include
, args
.boost_headers
],
350 plot_diagrams(results
, configs_in(args
.src_dir
), compiler
, args
.out_dir
)
353 if __name__
== '__main__':