]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | #!/usr/bin/env python |
2 | ||
3 | from __future__ import print_function | |
4 | ||
5 | import subprocess | |
6 | import uuid | |
7 | import re | |
8 | import json | |
9 | import sys | |
10 | import ast | |
11 | import requests | |
12 | from operator import itemgetter | |
13 | from heapq import nlargest | |
14 | ||
15 | ||
16 | CLUSTER_UUID_NAME='cluster-uuid' | |
17 | CLUSTER_OWNERSHIP_NAME='cluster-ownership' | |
18 | ||
19 | verbose = False | |
20 | ||
21 | ||
22 | try: | |
23 | from collections import Counter | |
24 | except ImportError: | |
25 | from itertools import repeat, ifilter | |
26 | ||
27 | class Counter(dict): | |
28 | '''Dict subclass for counting hashable objects. Sometimes called a bag | |
29 | or multiset. Elements are stored as dictionary keys and their counts | |
30 | are stored as dictionary values. | |
31 | ||
32 | >>> Counter('zyzygy') | |
33 | Counter({'y': 3, 'z': 2, 'g': 1}) | |
34 | ||
35 | ''' | |
36 | ||
37 | def __init__(self, iterable=None, **kwds): | |
38 | '''Create a new, empty Counter object. And if given, count elements | |
39 | from an input iterable. Or, initialize the count from another mapping | |
40 | of elements to their counts. | |
41 | ||
42 | >>> c = Counter() # a new, empty counter | |
43 | >>> c = Counter('gallahad') # a new counter from an iterable | |
44 | >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping | |
45 | >>> c = Counter(a=4, b=2) # a new counter from keyword args | |
46 | ||
47 | ''' | |
48 | self.update(iterable, **kwds) | |
49 | ||
50 | def __missing__(self, key): | |
51 | return 0 | |
52 | ||
53 | def most_common(self, n=None): | |
54 | '''List the n most common elements and their counts from the most | |
55 | common to the least. If n is None, then list all element counts. | |
56 | ||
57 | >>> Counter('abracadabra').most_common(3) | |
58 | [('a', 5), ('r', 2), ('b', 2)] | |
59 | ||
60 | ''' | |
61 | if n is None: | |
62 | return sorted(self.iteritems(), key=itemgetter(1), reverse=True) | |
63 | return nlargest(n, self.iteritems(), key=itemgetter(1)) | |
64 | ||
65 | def elements(self): | |
66 | '''Iterator over elements repeating each as many times as its count. | |
67 | ||
68 | >>> c = Counter('ABCABC') | |
69 | >>> sorted(c.elements()) | |
70 | ['A', 'A', 'B', 'B', 'C', 'C'] | |
71 | ||
72 | If an element's count has been set to zero or is a negative number, | |
73 | elements() will ignore it. | |
74 | ||
75 | ''' | |
76 | for elem, count in self.iteritems(): | |
77 | for _ in repeat(None, count): | |
78 | yield elem | |
79 | ||
80 | # Override dict methods where the meaning changes for Counter objects. | |
81 | ||
82 | @classmethod | |
83 | def fromkeys(cls, iterable, v=None): | |
84 | raise NotImplementedError( | |
85 | 'Counter.fromkeys() is undefined. Use Counter(iterable) instead.') | |
86 | ||
87 | def update(self, iterable=None, **kwds): | |
88 | '''Like dict.update() but add counts instead of replacing them. | |
89 | ||
90 | Source can be an iterable, a dictionary, or another Counter instance. | |
91 | ||
92 | >>> c = Counter('which') | |
93 | >>> c.update('witch') # add elements from another iterable | |
94 | >>> d = Counter('watch') | |
95 | >>> c.update(d) # add elements from another counter | |
96 | >>> c['h'] # four 'h' in which, witch, and watch | |
97 | 4 | |
98 | ||
99 | ''' | |
100 | if iterable is not None: | |
101 | if hasattr(iterable, 'iteritems'): | |
102 | if self: | |
103 | self_get = self.get | |
104 | for elem, count in iterable.iteritems(): | |
105 | self[elem] = self_get(elem, 0) + count | |
106 | else: | |
107 | dict.update(self, iterable) # fast path when counter is empty | |
108 | else: | |
109 | self_get = self.get | |
110 | for elem in iterable: | |
111 | self[elem] = self_get(elem, 0) + 1 | |
112 | if kwds: | |
113 | self.update(kwds) | |
114 | ||
115 | def copy(self): | |
116 | 'Like dict.copy() but returns a Counter instance instead of a dict.' | |
117 | return Counter(self) | |
118 | ||
119 | def __delitem__(self, elem): | |
120 | 'Like dict.__delitem__() but does not raise KeyError for missing values.' | |
121 | if elem in self: | |
122 | dict.__delitem__(self, elem) | |
123 | ||
124 | def __repr__(self): | |
125 | if not self: | |
126 | return '%s()' % self.__class__.__name__ | |
127 | items = ', '.join(map('%r: %r'.__mod__, self.most_common())) | |
128 | return '%s({%s})' % (self.__class__.__name__, items) | |
129 | ||
130 | # Multiset-style mathematical operations discussed in: | |
131 | # Knuth TAOCP Volume II section 4.6.3 exercise 19 | |
132 | # and at http://en.wikipedia.org/wiki/Multiset | |
133 | # | |
134 | # Outputs guaranteed to only include positive counts. | |
135 | # | |
136 | # To strip negative and zero counts, add-in an empty counter: | |
137 | # c += Counter() | |
138 | ||
139 | def __add__(self, other): | |
140 | '''Add counts from two counters. | |
141 | ||
142 | >>> Counter('abbb') + Counter('bcc') | |
143 | Counter({'b': 4, 'c': 2, 'a': 1}) | |
144 | ||
145 | ||
146 | ''' | |
147 | if not isinstance(other, Counter): | |
148 | return NotImplemented | |
149 | result = Counter() | |
150 | for elem in set(self) | set(other): | |
151 | newcount = self[elem] + other[elem] | |
152 | if newcount > 0: | |
153 | result[elem] = newcount | |
154 | return result | |
155 | ||
156 | def __sub__(self, other): | |
157 | ''' Subtract count, but keep only results with positive counts. | |
158 | ||
159 | >>> Counter('abbbc') - Counter('bccd') | |
160 | Counter({'b': 2, 'a': 1}) | |
161 | ||
162 | ''' | |
163 | if not isinstance(other, Counter): | |
164 | return NotImplemented | |
165 | result = Counter() | |
166 | for elem in set(self) | set(other): | |
167 | newcount = self[elem] - other[elem] | |
168 | if newcount > 0: | |
169 | result[elem] = newcount | |
170 | return result | |
171 | ||
172 | def __or__(self, other): | |
173 | '''Union is the maximum of value in either of the input counters. | |
174 | ||
175 | >>> Counter('abbb') | Counter('bcc') | |
176 | Counter({'b': 3, 'c': 2, 'a': 1}) | |
177 | ||
178 | ''' | |
179 | if not isinstance(other, Counter): | |
180 | return NotImplemented | |
181 | _max = max | |
182 | result = Counter() | |
183 | for elem in set(self) | set(other): | |
184 | newcount = _max(self[elem], other[elem]) | |
185 | if newcount > 0: | |
186 | result[elem] = newcount | |
187 | return result | |
188 | ||
189 | def __and__(self, other): | |
190 | ''' Intersection is the minimum of corresponding counts. | |
191 | ||
192 | >>> Counter('abbb') & Counter('bcc') | |
193 | Counter({'b': 1}) | |
194 | ||
195 | ''' | |
196 | if not isinstance(other, Counter): | |
197 | return NotImplemented | |
198 | _min = min | |
199 | result = Counter() | |
200 | if len(self) < len(other): | |
201 | self, other = other, self | |
202 | for elem in ifilter(self.__contains__, other): | |
203 | newcount = _min(self[elem], other[elem]) | |
204 | if newcount > 0: | |
205 | result[elem] = newcount | |
206 | return result | |
207 | ||
208 | ||
209 | def print_stderr(*args, **kwargs): | |
210 | kwargs.setdefault('file', sys.stderr) | |
211 | print(*args, **kwargs) | |
212 | ||
213 | def run_command(cmd): | |
214 | if verbose: | |
215 | print_stderr("run_command: " + str(cmd)) | |
216 | child = subprocess.Popen(cmd, stdout=subprocess.PIPE, | |
217 | stderr=subprocess.PIPE) | |
218 | (o, e) = child.communicate() | |
219 | o = o.decode('utf-8', 'ignore') | |
220 | e = e.decode('utf-8', 'ignore') | |
221 | return (child.returncode, o, e) | |
222 | ||
223 | ||
224 | def get_uuid(): | |
225 | (rc,uid,e) = run_command(['ceph', 'config-key', 'get', CLUSTER_UUID_NAME]) | |
226 | if rc: | |
227 | #uuid is not yet set. | |
228 | uid = str(uuid.uuid4()) | |
229 | (rc, o, e) = run_command(['ceph', 'config-key', 'put', | |
230 | CLUSTER_UUID_NAME, uid]) | |
231 | if rc: | |
232 | raise RuntimeError("\'ceph config-key put\' failed -" + e) | |
233 | ||
234 | return uid | |
235 | ||
236 | def bytes_pretty_to_raw(byte_count, byte_scale): | |
237 | if byte_scale == 'kB': | |
238 | return byte_count >> 10 | |
239 | if byte_scale == 'MB': | |
240 | return byte_count >> 20 | |
241 | if byte_scale == 'GB': | |
242 | return byte_count >> 30 | |
243 | if byte_scale == 'TB': | |
244 | return byte_count >> 40 | |
245 | if byte_scale == 'PB': | |
246 | return byte_count >> 50 | |
247 | if byte_scale == 'EB': | |
248 | return byte_count >> 60 | |
249 | ||
250 | return byte_count | |
251 | ||
252 | def get_nums(): | |
253 | (rc, o, e) = run_command(['ceph', '-s', '-f', 'json']) | |
254 | if rc: | |
255 | raise RuntimeError("\'ceph -s\' failed - " + e) | |
256 | ||
257 | oj = json.loads(o) | |
258 | num_mons = len(oj['monmap']['mons']) | |
259 | num_osds = int(oj['osdmap']['osdmap']['num_in_osds']) | |
260 | try: | |
261 | num_mdss = oj['fsmap']['in'] | |
262 | except KeyError: | |
263 | num_mdss = 0 | |
264 | ||
265 | pgmap = oj['pgmap'] | |
266 | num_pgs = pgmap['num_pgs'] | |
267 | num_data_bytes = pgmap['data_bytes'] | |
268 | num_bytes_total = pgmap['bytes_total'] | |
269 | ||
270 | (rc, o, e) = run_command(['ceph', 'pg', 'dump', 'pools', '-f', 'json-pretty']) | |
271 | if rc: | |
272 | raise RuntimeError("\'ceph pg dump pools\' failed - " + e) | |
273 | ||
274 | pools = json.loads(o) | |
275 | num_pools = len(pools) | |
276 | num_objs = 0 | |
277 | for p in pools: | |
278 | num_objs += p['stat_sum']['num_objects'] | |
279 | ||
280 | nums = {'num_mons':num_mons, | |
281 | 'num_osds':num_osds, | |
282 | 'num_mdss':num_mdss, | |
283 | 'num_pgs':num_pgs, | |
284 | 'num_data_bytes':num_data_bytes, | |
285 | 'num_bytes_total':num_bytes_total, | |
286 | 'num_pools':num_pools, | |
287 | 'num_objects':num_objs} | |
288 | return nums | |
289 | ||
290 | def get_crush_types(): | |
291 | (rc, o, e) = run_command(['ceph', 'osd', 'crush', 'dump']) | |
292 | if rc: | |
293 | raise RuntimeError("\'ceph osd crush dump\' failed - " + e) | |
294 | ||
295 | crush_dump = json.loads(o) | |
296 | if crush_dump['types'] is None: | |
297 | raise RuntimeError("\'types\' item missing in \'ceph osd crush dump\'") | |
298 | ||
299 | crush_types = {} | |
300 | for t in crush_dump['types']: | |
301 | crush_types[t['type_id']] = t['name'] | |
302 | ||
303 | types_list = [] | |
304 | for bucket in crush_dump['buckets']: | |
305 | types_list.append(bucket['type_id']) | |
306 | ||
307 | crush_map = [] | |
308 | types_counter = Counter(types_list) | |
309 | append = lambda t,c: crush_map.append({'type':t, 'count':c}) | |
310 | for id,count in types_counter.items(): | |
311 | append(crush_types[id], | |
312 | count) | |
313 | ||
314 | if 'devices' in crush_dump: | |
315 | append('devices', len(crush_dump['devices'])) | |
316 | ||
317 | return crush_map | |
318 | ||
319 | def get_osd_dump_info(): | |
320 | (rc, o, e) = run_command(['ceph', 'osd', 'dump', '-f', 'json']) | |
321 | if rc: | |
322 | raise RuntimeError("\'ceph osd dump\' failed - " + e) | |
323 | ||
324 | pool_meta = [] | |
325 | oj = json.loads(o) | |
326 | proc = lambda x: {'id':x['pool'], 'type':x['type'], 'size':x['size']} | |
327 | for p in oj['pools']: | |
328 | pool_meta.append(proc(p)) | |
329 | ||
330 | return oj['created'], pool_meta | |
331 | ||
332 | def get_sysinfo(max_osds): | |
333 | count = 0 | |
334 | osd_metadata_available = False | |
335 | ||
336 | os = {} | |
337 | kern_version = {} | |
338 | kern_description = {} | |
339 | distro = {} | |
340 | cpu = {} | |
341 | arch = {} | |
342 | ceph_version = {} | |
343 | ||
344 | incr = lambda a,k: 1 if k not in a else a[k]+1 | |
345 | while count < max_osds: | |
346 | (rc, o, e) = run_command(['ceph', 'osd', 'metadata', str(count)]) | |
347 | if rc == 0: | |
348 | if not osd_metadata_available: | |
349 | osd_metadata_available = True | |
350 | ||
351 | jmeta = json.loads(o) | |
352 | ||
353 | version = jmeta['ceph_version'].split() | |
354 | cv = version[2] | |
355 | if (len(version) > 3): | |
356 | cv += version[3] | |
357 | ||
358 | ceph_version[cv] = incr(ceph_version, cv) | |
359 | os[jmeta['os']] = incr(os, jmeta['os']) | |
360 | kern_version[jmeta['kernel_version']] = \ | |
361 | incr(kern_version, jmeta['kernel_version']) | |
362 | kern_description[jmeta['kernel_description']] = \ | |
363 | incr(kern_description, jmeta['kernel_description']) | |
364 | ||
365 | try: | |
366 | dstr = jmeta['distro'] + ' ' | |
367 | dstr += jmeta['distro_version'] + ' ' | |
368 | dstr += jmeta['distro_codename'] + ' (' | |
369 | dstr += jmeta['distro_description'] + ')' | |
370 | distro[dstr] = incr(distro, dstr) | |
371 | except KeyError: | |
372 | pass | |
373 | ||
374 | cpu[jmeta['cpu']] = incr(cpu, jmeta['cpu']) | |
375 | arch[jmeta['arch']] = incr(arch, jmeta['arch']) | |
376 | ||
377 | count = count + 1 | |
378 | ||
379 | sysinfo = {} | |
380 | if not osd_metadata_available: | |
381 | print_stderr("'ceph osd metadata' is not available at all") | |
382 | return sysinfo | |
383 | ||
384 | def jsonify(type_count, name, type_name): | |
385 | tmp = [] | |
386 | for k, v in type_count.items(): | |
387 | tmp.append({type_name:k, 'count':v}) | |
388 | sysinfo[name] = tmp | |
389 | ||
390 | jsonify(os, 'os_info', 'os') | |
391 | jsonify(kern_version, 'kernel_versions', 'version') | |
392 | jsonify(kern_description, 'kernel_types', 'type') | |
393 | jsonify(distro, 'distros', 'distro') | |
394 | jsonify(cpu, 'cpus', 'cpu') | |
395 | jsonify(arch, 'cpu_archs', 'arch') | |
396 | jsonify(ceph_version, 'ceph_versions', 'version') | |
397 | return sysinfo | |
398 | ||
399 | def get_ownership_info(): | |
400 | (rc, o, e) = run_command(['ceph', 'config-key', 'get', | |
401 | CLUSTER_OWNERSHIP_NAME]) | |
402 | if rc: | |
403 | return {} | |
404 | ||
405 | return ast.literal_eval(o) | |
406 | ||
407 | def output_json(): | |
408 | out = {} | |
409 | url = None | |
410 | ||
411 | out['uuid'] = get_uuid() | |
412 | nums = get_nums() | |
413 | num_osds = int(nums['num_osds']) | |
414 | out['components_count'] = nums | |
415 | out['crush_types'] = get_crush_types() | |
416 | out['cluster_creation_date'], out['pool_metadata'] = get_osd_dump_info() | |
417 | out['sysinfo'] = get_sysinfo(num_osds) | |
418 | ||
419 | owner = get_ownership_info() | |
420 | if owner is not None: | |
421 | out['ownership'] = owner | |
422 | if 'url' in owner: | |
423 | url = owner.pop('url') | |
424 | ||
425 | return json.dumps(out, indent=2, separators=(',', ': ')), url | |
426 | ||
427 | def describe_usage(): | |
428 | print_stderr("Usage:") | |
429 | print_stderr("======") | |
430 | print_stderr() | |
431 | print_stderr(sys.argv[0] + " [-v|--verbose] [<commands> [command-options]]") | |
432 | print_stderr() | |
433 | print_stderr("without any option, shows the data to be published and do nothing") | |
434 | print_stderr() | |
435 | print_stderr("-v|--verbose: toggle verbose output on stdout") | |
436 | print_stderr() | |
437 | print_stderr("commands:") | |
438 | print_stderr("publish - publish the brag report to the server") | |
439 | print_stderr("update-metadata <update-metadata-options> - Update") | |
440 | print_stderr(" ownership information for bragging") | |
441 | print_stderr("clear-metadata - Clear information set by update-metadata") | |
442 | print_stderr("unpublish --yes-i-am-shy - delete the brag report from the server") | |
443 | print_stderr() | |
444 | ||
445 | print_stderr("update-metadata options:") | |
446 | print_stderr("--name= - Name of the cluster") | |
447 | print_stderr("--organization= - Name of the organization") | |
448 | print_stderr("--email= - Email contact address") | |
449 | print_stderr("--description= - Reporting use-case") | |
450 | print_stderr("--url= - The URL that is used to publish and unpublish") | |
451 | print_stderr() | |
452 | ||
453 | def update_metadata(): | |
454 | info = {} | |
455 | possibles = ['name', 'organization', 'email', 'description', 'url'] | |
456 | ||
457 | #get the existing values | |
458 | info = get_ownership_info(); | |
459 | ||
460 | for index in range(2, len(sys.argv)): | |
461 | mo = re.search("--(\S+)=(.*)", sys.argv[index]) | |
462 | if not mo: | |
463 | describe_usage() | |
464 | return 22 | |
465 | ||
466 | k = mo.group(1) | |
467 | v = mo.group(2) | |
468 | ||
469 | if k in possibles: | |
470 | info[k] = v | |
471 | else: | |
472 | print_stderr("Unexpect option --" + k) | |
473 | describe_usage() | |
474 | return 22 | |
475 | ||
476 | (rc, o, e) = run_command(['ceph', 'config-key', 'put', | |
477 | CLUSTER_OWNERSHIP_NAME, str(info)]) | |
478 | return rc | |
479 | ||
480 | def clear_metadata(): | |
481 | (rc, o, e) = run_command(['ceph', 'config-key', 'del', | |
482 | CLUSTER_OWNERSHIP_NAME]) | |
483 | return rc | |
484 | ||
485 | def publish(): | |
486 | data, url = output_json() | |
487 | if url is None: | |
488 | print_stderr("Cannot publish until a URL is set using update-metadata") | |
489 | return 1 | |
490 | ||
491 | if verbose: | |
492 | print_stderr("PUT " + str(url) + " : " + str(data)) | |
493 | req = requests.put(url, data=data) | |
494 | if req.status_code != 201: | |
495 | print_stderr("Failed to publish, server responded with code " + str(req.status_code)) | |
496 | print_stderr(req.text) | |
497 | return 1 | |
498 | ||
499 | return 0 | |
500 | ||
501 | def unpublish(): | |
502 | if len(sys.argv) <= 2 or sys.argv[2] != '--yes-i-am-shy': | |
503 | print_stderr("unpublish should be followed by --yes-i-am-shy") | |
504 | return 22 | |
505 | ||
506 | fail = False | |
507 | owner = get_ownership_info() | |
508 | if owner is None: | |
509 | fail = True | |
510 | try: | |
511 | url = owner['url'] | |
512 | except KeyError: | |
513 | fail = True | |
514 | ||
515 | if fail: | |
516 | print_stderr("URL is not updated yet") | |
517 | return 1 | |
518 | ||
519 | uuid = get_uuid() | |
520 | ||
521 | params = {'uuid':uuid} | |
522 | req = requests.delete(url, params=params) | |
523 | if req.status_code != 200: | |
524 | print_stderr("Failed to unpublish, server responsed with code " + str(req.status_code)) | |
525 | return 1 | |
526 | ||
527 | return 0 | |
528 | ||
529 | def main(): | |
530 | if len(sys.argv) > 1 and ( sys.argv[1] == '--verbose' or sys.argv[1] == '-v' ): | |
531 | global verbose | |
532 | verbose = True | |
533 | sys.argv.pop(1) | |
534 | if len(sys.argv) == 1: | |
535 | print(output_json()[0]) | |
536 | return 0 | |
537 | if sys.argv[1] == 'update-metadata': | |
538 | return update_metadata() | |
539 | elif sys.argv[1] == 'clear-metadata': | |
540 | return clear_metadata() | |
541 | elif sys.argv[1] == 'publish': | |
542 | return publish() | |
543 | elif sys.argv[1] == 'unpublish': | |
544 | return unpublish() | |
545 | else: | |
546 | describe_usage() | |
547 | return 22 | |
548 | ||
549 | if __name__ == '__main__': | |
550 | sys.exit(main()) |