]> git.proxmox.com Git - ceph.git/blob - ceph/src/test/ceph_objectstore_tool.py
update sources to v12.1.2
[ceph.git] / ceph / src / test / ceph_objectstore_tool.py
1 #!/usr/bin/env python
2
3 from __future__ import print_function
4 from subprocess import call
5 try:
6 from subprocess import check_output
7 except ImportError:
8 def check_output(*popenargs, **kwargs):
9 import subprocess
10 # backported from python 2.7 stdlib
11 process = subprocess.Popen(
12 stdout=subprocess.PIPE, *popenargs, **kwargs)
13 output, unused_err = process.communicate()
14 retcode = process.poll()
15 if retcode:
16 cmd = kwargs.get("args")
17 if cmd is None:
18 cmd = popenargs[0]
19 error = subprocess.CalledProcessError(retcode, cmd)
20 error.output = output
21 raise error
22 return output
23
24 import filecmp
25 import os
26 import subprocess
27 import math
28 import time
29 import sys
30 import re
31 import logging
32 import json
33 import tempfile
34 import platform
35
36 try:
37 from subprocess import DEVNULL
38 except ImportError:
39 DEVNULL = open(os.devnull, "wb")
40
41 logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.WARNING)
42
43
44 if sys.version_info[0] >= 3:
45 def decode(s):
46 return s.decode('utf-8')
47
48 def check_output(*args, **kwargs):
49 return decode(subprocess.check_output(*args, **kwargs))
50 else:
51 def decode(s):
52 return s
53
54
55
56 def wait_for_health():
57 print("Wait for health_ok...", end="")
58 tries = 0
59 while call("{path}/ceph health 2> /dev/null | grep -v 'HEALTH_OK\|HEALTH_WARN' > /dev/null".format(path=CEPH_BIN), shell=True) == 0:
60 tries += 1
61 if tries == 150:
62 raise Exception("Time exceeded to go to health")
63 time.sleep(1)
64 print("DONE")
65
66
67 def get_pool_id(name, nullfd):
68 cmd = "{path}/ceph osd pool stats {pool}".format(pool=name, path=CEPH_BIN).split()
69 # pool {pool} id # .... grab the 4 field
70 return check_output(cmd, stderr=nullfd).split()[3]
71
72
73 # return a list of unique PGS given an osd subdirectory
74 def get_osd_pgs(SUBDIR, ID):
75 PGS = []
76 if ID:
77 endhead = re.compile("{id}.*_head$".format(id=ID))
78 DIR = os.path.join(SUBDIR, "current")
79 PGS += [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and (ID is None or endhead.match(f))]
80 PGS = [re.sub("_head", "", p) for p in PGS if "_head" in p]
81 return PGS
82
83
84 # return a sorted list of unique PGs given a directory
85 def get_pgs(DIR, ID):
86 OSDS = [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and f.find("osd") == 0]
87 PGS = []
88 for d in OSDS:
89 SUBDIR = os.path.join(DIR, d)
90 PGS += get_osd_pgs(SUBDIR, ID)
91 return sorted(set(PGS))
92
93
94 # return a sorted list of PGS a subset of ALLPGS that contain objects with prefix specified
95 def get_objs(ALLPGS, prefix, DIR, ID):
96 OSDS = [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and f.find("osd") == 0]
97 PGS = []
98 for d in OSDS:
99 DIRL2 = os.path.join(DIR, d)
100 SUBDIR = os.path.join(DIRL2, "current")
101 for p in ALLPGS:
102 PGDIR = p + "_head"
103 if not os.path.isdir(os.path.join(SUBDIR, PGDIR)):
104 continue
105 FINALDIR = os.path.join(SUBDIR, PGDIR)
106 # See if there are any objects there
107 if any(f for f in [val for _, _, fl in os.walk(FINALDIR) for val in fl] if f.startswith(prefix)):
108 PGS += [p]
109 return sorted(set(PGS))
110
111
112 # return a sorted list of OSDS which have data from a given PG
113 def get_osds(PG, DIR):
114 ALLOSDS = [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and f.find("osd") == 0]
115 OSDS = []
116 for d in ALLOSDS:
117 DIRL2 = os.path.join(DIR, d)
118 SUBDIR = os.path.join(DIRL2, "current")
119 PGDIR = PG + "_head"
120 if not os.path.isdir(os.path.join(SUBDIR, PGDIR)):
121 continue
122 OSDS += [d]
123 return sorted(OSDS)
124
125
126 def get_lines(filename):
127 tmpfd = open(filename, "r")
128 line = True
129 lines = []
130 while line:
131 line = tmpfd.readline().rstrip('\n')
132 if line:
133 lines += [line]
134 tmpfd.close()
135 os.unlink(filename)
136 return lines
137
138
139 def cat_file(level, filename):
140 if level < logging.getLogger().getEffectiveLevel():
141 return
142 print("File: " + filename)
143 with open(filename, "r") as f:
144 while True:
145 line = f.readline().rstrip('\n')
146 if not line:
147 break
148 print(line)
149 print("<EOF>")
150
151
152 def vstart(new, opt=""):
153 print("vstarting....", end="")
154 NEW = new and "-n" or "-N"
155 call("MON=1 OSD=4 MDS=0 MGR=1 CEPH_PORT=7400 {path}/src/vstart.sh --short -l {new} -d {opt} > /dev/null 2>&1".format(new=NEW, opt=opt, path=CEPH_ROOT), shell=True)
156 print("DONE")
157
158
159 def test_failure(cmd, errmsg, tty=False):
160 if tty:
161 try:
162 ttyfd = open("/dev/tty", "rwb")
163 except Exception as e:
164 logging.info(str(e))
165 logging.info("SKIP " + cmd)
166 return 0
167 TMPFILE = r"/tmp/tmp.{pid}".format(pid=os.getpid())
168 tmpfd = open(TMPFILE, "wb")
169
170 logging.debug(cmd)
171 if tty:
172 ret = call(cmd, shell=True, stdin=ttyfd, stdout=ttyfd, stderr=tmpfd)
173 ttyfd.close()
174 else:
175 ret = call(cmd, shell=True, stderr=tmpfd)
176 tmpfd.close()
177 if ret == 0:
178 logging.error(cmd)
179 logging.error("Should have failed, but got exit 0")
180 return 1
181 lines = get_lines(TMPFILE)
182 matched = [ l for l in lines if errmsg in l ]
183 if any(matched):
184 logging.info("Correctly failed with message \"" + matched[0] + "\"")
185 return 0
186 else:
187 logging.error("Command: " + cmd )
188 logging.error("Bad messages to stderr \"" + str(lines) + "\"")
189 logging.error("Expected \"" + errmsg + "\"")
190 return 1
191
192
193 def get_nspace(num):
194 if num == 0:
195 return ""
196 return "ns{num}".format(num=num)
197
198
199 def verify(DATADIR, POOL, NAME_PREFIX, db):
200 TMPFILE = r"/tmp/tmp.{pid}".format(pid=os.getpid())
201 ERRORS = 0
202 for rawnsfile in [f for f in os.listdir(DATADIR) if f.split('-')[1].find(NAME_PREFIX) == 0]:
203 nsfile = rawnsfile.split("__")[0]
204 clone = rawnsfile.split("__")[1]
205 nspace = nsfile.split("-")[0]
206 file = nsfile.split("-")[1]
207 # Skip clones
208 if clone != "head":
209 continue
210 path = os.path.join(DATADIR, rawnsfile)
211 try:
212 os.unlink(TMPFILE)
213 except:
214 pass
215 cmd = "{path}/rados -p {pool} -N '{nspace}' get {file} {out}".format(pool=POOL, file=file, out=TMPFILE, nspace=nspace, path=CEPH_BIN)
216 logging.debug(cmd)
217 call(cmd, shell=True, stdout=DEVNULL, stderr=DEVNULL)
218 cmd = "diff -q {src} {result}".format(src=path, result=TMPFILE)
219 logging.debug(cmd)
220 ret = call(cmd, shell=True)
221 if ret != 0:
222 logging.error("{file} data not imported properly".format(file=file))
223 ERRORS += 1
224 try:
225 os.unlink(TMPFILE)
226 except:
227 pass
228 for key, val in db[nspace][file]["xattr"].items():
229 cmd = "{path}/rados -p {pool} -N '{nspace}' getxattr {name} {key}".format(pool=POOL, name=file, key=key, nspace=nspace, path=CEPH_BIN)
230 logging.debug(cmd)
231 getval = check_output(cmd, shell=True, stderr=DEVNULL)
232 logging.debug("getxattr {key} {val}".format(key=key, val=getval))
233 if getval != val:
234 logging.error("getxattr of key {key} returned wrong val: {get} instead of {orig}".format(key=key, get=getval, orig=val))
235 ERRORS += 1
236 continue
237 hdr = db[nspace][file].get("omapheader", "")
238 cmd = "{path}/rados -p {pool} -N '{nspace}' getomapheader {name} {file}".format(pool=POOL, name=file, nspace=nspace, file=TMPFILE, path=CEPH_BIN)
239 logging.debug(cmd)
240 ret = call(cmd, shell=True, stderr=DEVNULL)
241 if ret != 0:
242 logging.error("rados getomapheader returned {ret}".format(ret=ret))
243 ERRORS += 1
244 else:
245 getlines = get_lines(TMPFILE)
246 assert(len(getlines) == 0 or len(getlines) == 1)
247 if len(getlines) == 0:
248 gethdr = ""
249 else:
250 gethdr = getlines[0]
251 logging.debug("header: {hdr}".format(hdr=gethdr))
252 if gethdr != hdr:
253 logging.error("getomapheader returned wrong val: {get} instead of {orig}".format(get=gethdr, orig=hdr))
254 ERRORS += 1
255 for key, val in db[nspace][file]["omap"].items():
256 cmd = "{path}/rados -p {pool} -N '{nspace}' getomapval {name} {key} {file}".format(pool=POOL, name=file, key=key, nspace=nspace, file=TMPFILE, path=CEPH_BIN)
257 logging.debug(cmd)
258 ret = call(cmd, shell=True, stderr=DEVNULL)
259 if ret != 0:
260 logging.error("getomapval returned {ret}".format(ret=ret))
261 ERRORS += 1
262 continue
263 getlines = get_lines(TMPFILE)
264 if len(getlines) != 1:
265 logging.error("Bad data from getomapval {lines}".format(lines=getlines))
266 ERRORS += 1
267 continue
268 getval = getlines[0]
269 logging.debug("getomapval {key} {val}".format(key=key, val=getval))
270 if getval != val:
271 logging.error("getomapval returned wrong val: {get} instead of {orig}".format(get=getval, orig=val))
272 ERRORS += 1
273 try:
274 os.unlink(TMPFILE)
275 except:
276 pass
277 return ERRORS
278
279
280 def check_journal(jsondict):
281 errors = 0
282 if 'header' not in jsondict:
283 logging.error("Key 'header' not in dump-journal")
284 errors += 1
285 elif 'max_size' not in jsondict['header']:
286 logging.error("Key 'max_size' not in dump-journal header")
287 errors += 1
288 else:
289 print("\tJournal max_size = {size}".format(size=jsondict['header']['max_size']))
290 if 'entries' not in jsondict:
291 logging.error("Key 'entries' not in dump-journal output")
292 errors += 1
293 elif len(jsondict['entries']) == 0:
294 logging.info("No entries in journal found")
295 else:
296 errors += check_journal_entries(jsondict['entries'])
297 return errors
298
299
300 def check_journal_entries(entries):
301 errors = 0
302 for enum in range(len(entries)):
303 if 'offset' not in entries[enum]:
304 logging.error("No 'offset' key in entry {e}".format(e=enum))
305 errors += 1
306 if 'seq' not in entries[enum]:
307 logging.error("No 'seq' key in entry {e}".format(e=enum))
308 errors += 1
309 if 'transactions' not in entries[enum]:
310 logging.error("No 'transactions' key in entry {e}".format(e=enum))
311 errors += 1
312 elif len(entries[enum]['transactions']) == 0:
313 logging.error("No transactions found in entry {e}".format(e=enum))
314 errors += 1
315 else:
316 errors += check_entry_transactions(entries[enum], enum)
317 return errors
318
319
320 def check_entry_transactions(entry, enum):
321 errors = 0
322 for tnum in range(len(entry['transactions'])):
323 if 'trans_num' not in entry['transactions'][tnum]:
324 logging.error("Key 'trans_num' missing from entry {e} trans {t}".format(e=enum, t=tnum))
325 errors += 1
326 elif entry['transactions'][tnum]['trans_num'] != tnum:
327 ft = entry['transactions'][tnum]['trans_num']
328 logging.error("Bad trans_num ({ft}) entry {e} trans {t}".format(ft=ft, e=enum, t=tnum))
329 errors += 1
330 if 'ops' not in entry['transactions'][tnum]:
331 logging.error("Key 'ops' missing from entry {e} trans {t}".format(e=enum, t=tnum))
332 errors += 1
333 else:
334 errors += check_transaction_ops(entry['transactions'][tnum]['ops'], enum, tnum)
335 return errors
336
337
338 def check_transaction_ops(ops, enum, tnum):
339 if len(ops) is 0:
340 logging.warning("No ops found in entry {e} trans {t}".format(e=enum, t=tnum))
341 errors = 0
342 for onum in range(len(ops)):
343 if 'op_num' not in ops[onum]:
344 logging.error("Key 'op_num' missing from entry {e} trans {t} op {o}".format(e=enum, t=tnum, o=onum))
345 errors += 1
346 elif ops[onum]['op_num'] != onum:
347 fo = ops[onum]['op_num']
348 logging.error("Bad op_num ({fo}) from entry {e} trans {t} op {o}".format(fo=fo, e=enum, t=tnum, o=onum))
349 errors += 1
350 if 'op_name' not in ops[onum]:
351 logging.error("Key 'op_name' missing from entry {e} trans {t} op {o}".format(e=enum, t=tnum, o=onum))
352 errors += 1
353 return errors
354
355
356 def test_dump_journal(CFSD_PREFIX, osds):
357 ERRORS = 0
358 pid = os.getpid()
359 TMPFILE = r"/tmp/tmp.{pid}".format(pid=pid)
360
361 for osd in osds:
362 # Test --op dump-journal by loading json
363 cmd = (CFSD_PREFIX + "--op dump-journal --format json").format(osd=osd)
364 logging.debug(cmd)
365 tmpfd = open(TMPFILE, "wb")
366 ret = call(cmd, shell=True, stdout=tmpfd)
367 if ret != 0:
368 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
369 ERRORS += 1
370 continue
371 tmpfd.close()
372 tmpfd = open(TMPFILE, "r")
373 jsondict = json.load(tmpfd)
374 tmpfd.close()
375 os.unlink(TMPFILE)
376
377 journal_errors = check_journal(jsondict)
378 if journal_errors is not 0:
379 logging.error(jsondict)
380 ERRORS += journal_errors
381
382 return ERRORS
383
384 CEPH_BUILD_DIR = os.environ.get('CEPH_BUILD_DIR')
385 CEPH_BIN = os.environ.get('CEPH_BIN')
386 CEPH_ROOT = os.environ.get('CEPH_ROOT')
387
388 if not CEPH_BUILD_DIR:
389 CEPH_BUILD_DIR=os.getcwd()
390 os.putenv('CEPH_BUILD_DIR', CEPH_BUILD_DIR)
391 CEPH_BIN=CEPH_BUILD_DIR
392 os.putenv('CEPH_BIN', CEPH_BIN)
393 CEPH_ROOT=os.path.dirname(CEPH_BUILD_DIR)
394 os.putenv('CEPH_ROOT', CEPH_ROOT)
395 CEPH_LIB=os.path.join(CEPH_BIN, '.libs')
396 os.putenv('CEPH_LIB', CEPH_LIB)
397
398 CEPH_DIR = CEPH_BUILD_DIR + "/cot_dir"
399 CEPH_CONF = os.path.join(CEPH_DIR, 'ceph.conf')
400
401 def kill_daemons():
402 call("{path}/init-ceph -c {conf} stop > /dev/null 2>&1".format(conf=CEPH_CONF, path=CEPH_BIN), shell=True)
403
404
405 def check_data(DATADIR, TMPFILE, OSDDIR, SPLIT_NAME):
406 repcount = 0
407 ERRORS = 0
408 for rawnsfile in [f for f in os.listdir(DATADIR) if f.split('-')[1].find(SPLIT_NAME) == 0]:
409 nsfile = rawnsfile.split("__")[0]
410 clone = rawnsfile.split("__")[1]
411 nspace = nsfile.split("-")[0]
412 file = nsfile.split("-")[1] + "__" + clone
413 # Skip clones
414 if clone != "head":
415 continue
416 path = os.path.join(DATADIR, rawnsfile)
417 tmpfd = open(TMPFILE, "wb")
418 cmd = "find {dir} -name '{file}_*_{nspace}_*'".format(dir=OSDDIR, file=file, nspace=nspace)
419 logging.debug(cmd)
420 ret = call(cmd, shell=True, stdout=tmpfd)
421 if ret:
422 logging.critical("INTERNAL ERROR")
423 return 1
424 tmpfd.close()
425 obj_locs = get_lines(TMPFILE)
426 if len(obj_locs) == 0:
427 logging.error("Can't find imported object {name}".format(name=file))
428 ERRORS += 1
429 for obj_loc in obj_locs:
430 # For btrfs skip snap_* dirs
431 if re.search("/snap_[0-9]*/", obj_loc) is not None:
432 continue
433 repcount += 1
434 cmd = "diff -q {src} {obj_loc}".format(src=path, obj_loc=obj_loc)
435 logging.debug(cmd)
436 ret = call(cmd, shell=True)
437 if ret != 0:
438 logging.error("{file} data not imported properly into {obj}".format(file=file, obj=obj_loc))
439 ERRORS += 1
440 return ERRORS, repcount
441
442
443 def set_osd_weight(CFSD_PREFIX, osd_ids, osd_path, weight):
444 # change the weight of osd.0 to math.pi in the newest osdmap of given osd
445 osdmap_file = tempfile.NamedTemporaryFile(delete=True)
446 cmd = (CFSD_PREFIX + "--op get-osdmap --file {osdmap_file}").format(osd=osd_path,
447 osdmap_file=osdmap_file.name)
448 output = check_output(cmd, shell=True)
449 epoch = int(re.findall('#(\d+)', output)[0])
450
451 new_crush_file = tempfile.NamedTemporaryFile(delete=True)
452 old_crush_file = tempfile.NamedTemporaryFile(delete=True)
453 ret = call("{path}/osdmaptool --export-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name,
454 crush_file=old_crush_file.name, path=CEPH_BIN),
455 stdout=DEVNULL,
456 stderr=DEVNULL,
457 shell=True)
458 assert(ret == 0)
459
460 for osd_id in osd_ids:
461 cmd = "{path}/crushtool -i {crush_file} --reweight-item osd.{osd} {weight} -o {new_crush_file}".format(osd=osd_id,
462 crush_file=old_crush_file.name,
463 weight=weight,
464 new_crush_file=new_crush_file.name, path=CEPH_BIN)
465 ret = call(cmd, stdout=DEVNULL, shell=True)
466 assert(ret == 0)
467 old_crush_file, new_crush_file = new_crush_file, old_crush_file
468
469 # change them back, since we don't need to preapre for another round
470 old_crush_file, new_crush_file = new_crush_file, old_crush_file
471 old_crush_file.close()
472
473 ret = call("{path}/osdmaptool --import-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name,
474 crush_file=new_crush_file.name, path=CEPH_BIN),
475 stdout=DEVNULL,
476 stderr=DEVNULL,
477 shell=True)
478 assert(ret == 0)
479
480 # Minimum test of --dry-run by using it, but not checking anything
481 cmd = CFSD_PREFIX + "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force --dry-run"
482 cmd = cmd.format(osd=osd_path, osdmap_file=osdmap_file.name, epoch=epoch)
483 ret = call(cmd, stdout=DEVNULL, shell=True)
484 assert(ret == 0)
485
486 # osdmaptool increases the epoch of the changed osdmap, so we need to force the tool
487 # to use use a different epoch than the one in osdmap
488 cmd = CFSD_PREFIX + "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force"
489 cmd = cmd.format(osd=osd_path, osdmap_file=osdmap_file.name, epoch=epoch)
490 ret = call(cmd, stdout=DEVNULL, shell=True)
491
492 return ret == 0
493
494 def get_osd_weights(CFSD_PREFIX, osd_ids, osd_path):
495 osdmap_file = tempfile.NamedTemporaryFile(delete=True)
496 cmd = (CFSD_PREFIX + "--op get-osdmap --file {osdmap_file}").format(osd=osd_path,
497 osdmap_file=osdmap_file.name)
498 ret = call(cmd, stdout=DEVNULL, shell=True)
499 if ret != 0:
500 return None
501 # we have to read the weights from the crush map, even we can query the weights using
502 # osdmaptool, but please keep in mind, they are different:
503 # item weights in crush map versus weight associated with each osd in osdmap
504 crush_file = tempfile.NamedTemporaryFile(delete=True)
505 ret = call("{path}/osdmaptool --export-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name,
506 crush_file=crush_file.name, path=CEPH_BIN),
507 stdout=DEVNULL,
508 shell=True)
509 assert(ret == 0)
510 output = check_output("{path}/crushtool --tree -i {crush_file} | tail -n {num_osd}".format(crush_file=crush_file.name,
511 num_osd=len(osd_ids), path=CEPH_BIN),
512 stderr=DEVNULL,
513 shell=True)
514 weights = []
515 for line in output.strip().split('\n'):
516 print(line)
517 linev = re.split('\s+', line)
518 if linev[0] is '':
519 linev.pop(0)
520 print('linev %s' % linev)
521 weights.append(float(linev[1]))
522
523 return weights
524
525
526 def test_get_set_osdmap(CFSD_PREFIX, osd_ids, osd_paths):
527 print("Testing get-osdmap and set-osdmap")
528 errors = 0
529 kill_daemons()
530 weight = 1 / math.e # just some magic number in [0, 1]
531 changed = []
532 for osd_path in osd_paths:
533 if set_osd_weight(CFSD_PREFIX, osd_ids, osd_path, weight):
534 changed.append(osd_path)
535 else:
536 logging.warning("Failed to change the weights: {0}".format(osd_path))
537 # i am pissed off if none of the store gets changed
538 if not changed:
539 errors += 1
540
541 for osd_path in changed:
542 weights = get_osd_weights(CFSD_PREFIX, osd_ids, osd_path)
543 if not weights:
544 errors += 1
545 continue
546 if any(abs(w - weight) > 1e-5 for w in weights):
547 logging.warning("Weight is not changed: {0} != {1}".format(weights, weight))
548 errors += 1
549 return errors
550
551 def test_get_set_inc_osdmap(CFSD_PREFIX, osd_path):
552 # incrementals are not used unless we need to build an MOSDMap to update
553 # OSD's peers, so an obvious way to test it is simply overwrite an epoch
554 # with a different copy, and read it back to see if it matches.
555 kill_daemons()
556 file_e2 = tempfile.NamedTemporaryFile(delete=True)
557 cmd = (CFSD_PREFIX + "--op get-inc-osdmap --file {file}").format(osd=osd_path,
558 file=file_e2.name)
559 output = check_output(cmd, shell=True)
560 epoch = int(re.findall('#(\d+)', output)[0])
561 # backup e1 incremental before overwriting it
562 epoch -= 1
563 file_e1_backup = tempfile.NamedTemporaryFile(delete=True)
564 cmd = CFSD_PREFIX + "--op get-inc-osdmap --epoch {epoch} --file {file}"
565 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_backup.name), shell=True)
566 if ret: return 1
567 # overwrite e1 with e2
568 cmd = CFSD_PREFIX + "--op set-inc-osdmap --force --epoch {epoch} --file {file}"
569 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e2.name), shell=True)
570 if ret: return 1
571 # Use dry-run to set back to e1 which shouldn't happen
572 cmd = CFSD_PREFIX + "--op set-inc-osdmap --dry-run --epoch {epoch} --file {file}"
573 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_backup.name), shell=True)
574 if ret: return 1
575 # read from e1
576 file_e1_read = tempfile.NamedTemporaryFile(delete=True)
577 cmd = CFSD_PREFIX + "--op get-inc-osdmap --epoch {epoch} --file {file}"
578 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_read.name), shell=True)
579 if ret: return 1
580 errors = 0
581 try:
582 if not filecmp.cmp(file_e2.name, file_e1_read.name, shallow=False):
583 logging.error("{{get,set}}-inc-osdmap mismatch {0} != {1}".format(file_e2.name, file_e1_read.name))
584 errors += 1
585 finally:
586 # revert the change with file_e1_backup
587 cmd = CFSD_PREFIX + "--op set-inc-osdmap --epoch {epoch} --file {file}"
588 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_backup.name), shell=True)
589 if ret:
590 logging.error("Failed to revert the changed inc-osdmap")
591 errors += 1
592
593 return errors
594
595
596 def test_removeall(CFSD_PREFIX, db, OBJREPPGS, REP_POOL, CEPH_BIN, OSDDIR, REP_NAME, NUM_CLONED_REP_OBJECTS):
597 # Test removeall
598 TMPFILE = r"/tmp/tmp.{pid}".format(pid=os.getpid())
599 nullfd = open(os.devnull, "w")
600 errors=0
601 print("Test removeall")
602 kill_daemons()
603 for nspace in db.keys():
604 for basename in db[nspace].keys():
605 JSON = db[nspace][basename]['json']
606 for pg in OBJREPPGS:
607 OSDS = get_osds(pg, OSDDIR)
608 for osd in OSDS:
609 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
610 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
611 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
612 if not fnames:
613 continue
614
615 if int(basename.split(REP_NAME)[1]) <= int(NUM_CLONED_REP_OBJECTS):
616 cmd = (CFSD_PREFIX + "'{json}' remove").format(osd=osd, json=JSON)
617 errors += test_failure(cmd, "Snapshots are present, use removeall to delete everything")
618
619 cmd = (CFSD_PREFIX + " --force --dry-run '{json}' remove").format(osd=osd, json=JSON)
620 logging.debug(cmd)
621 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
622 if ret != 0:
623 logging.error("remove with --force failed for {json}".format(json=JSON))
624 errors += 1
625
626 cmd = (CFSD_PREFIX + " --dry-run '{json}' removeall").format(osd=osd, json=JSON)
627 logging.debug(cmd)
628 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
629 if ret != 0:
630 logging.error("removeall failed for {json}".format(json=JSON))
631 errors += 1
632
633 cmd = (CFSD_PREFIX + " '{json}' removeall").format(osd=osd, json=JSON)
634 logging.debug(cmd)
635 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
636 if ret != 0:
637 logging.error("removeall failed for {json}".format(json=JSON))
638 errors += 1
639
640 tmpfd = open(TMPFILE, "w")
641 cmd = (CFSD_PREFIX + "--op list --pgid {pg} --namespace {ns} {name}").format(osd=osd, pg=pg, ns=nspace, name=basename)
642 logging.debug(cmd)
643 ret = call(cmd, shell=True, stdout=tmpfd)
644 if ret != 0:
645 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
646 errors += 1
647 tmpfd.close()
648 lines = get_lines(TMPFILE)
649 if len(lines) != 0:
650 logging.error("Removeall didn't remove all objects {ns}/{name} : {lines}".format(ns=nspace, name=basename, lines=lines))
651 errors += 1
652 vstart(new=False)
653 wait_for_health()
654 cmd = "{path}/rados -p {pool} rmsnap snap1".format(pool=REP_POOL, path=CEPH_BIN)
655 logging.debug(cmd)
656 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
657 if ret != 0:
658 logging.error("rados rmsnap failed")
659 errors += 1
660 time.sleep(2)
661 wait_for_health()
662 return errors
663
664
665 def main(argv):
666 if sys.version_info[0] < 3:
667 sys.stdout = stdout = os.fdopen(sys.stdout.fileno(), 'wb', 0)
668 else:
669 stdout = sys.stdout.buffer
670 if len(argv) > 1 and argv[1] == "debug":
671 nullfd = stdout
672 else:
673 nullfd = DEVNULL
674
675 call("rm -fr {dir}; mkdir {dir}".format(dir=CEPH_DIR), shell=True)
676 os.environ["CEPH_DIR"] = CEPH_DIR
677 OSDDIR = os.path.join(CEPH_DIR, "dev")
678 REP_POOL = "rep_pool"
679 REP_NAME = "REPobject"
680 EC_POOL = "ec_pool"
681 EC_NAME = "ECobject"
682 if len(argv) > 0 and argv[0] == 'large':
683 PG_COUNT = 12
684 NUM_REP_OBJECTS = 800
685 NUM_CLONED_REP_OBJECTS = 100
686 NUM_EC_OBJECTS = 12
687 NUM_NSPACES = 4
688 # Larger data sets for first object per namespace
689 DATALINECOUNT = 50000
690 # Number of objects to do xattr/omap testing on
691 ATTR_OBJS = 10
692 else:
693 PG_COUNT = 4
694 NUM_REP_OBJECTS = 2
695 NUM_CLONED_REP_OBJECTS = 2
696 NUM_EC_OBJECTS = 2
697 NUM_NSPACES = 2
698 # Larger data sets for first object per namespace
699 DATALINECOUNT = 10
700 # Number of objects to do xattr/omap testing on
701 ATTR_OBJS = 2
702 ERRORS = 0
703 pid = os.getpid()
704 TESTDIR = "/tmp/test.{pid}".format(pid=pid)
705 DATADIR = "/tmp/data.{pid}".format(pid=pid)
706 CFSD_PREFIX = CEPH_BIN + "/ceph-objectstore-tool --data-path " + OSDDIR + "/{osd} "
707 PROFNAME = "testecprofile"
708
709 os.environ['CEPH_CONF'] = CEPH_CONF
710 vstart(new=True)
711 wait_for_health()
712
713 cmd = "{path}/ceph osd pool create {pool} {pg} {pg} replicated".format(pool=REP_POOL, pg=PG_COUNT, path=CEPH_BIN)
714 logging.debug(cmd)
715 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
716 REPID = get_pool_id(REP_POOL, nullfd)
717
718 print("Created Replicated pool #{repid}".format(repid=REPID))
719
720 cmd = "{path}/ceph osd erasure-code-profile set {prof} crush-failure-domain=osd".format(prof=PROFNAME, path=CEPH_BIN)
721 logging.debug(cmd)
722 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
723 cmd = "{path}/ceph osd erasure-code-profile get {prof}".format(prof=PROFNAME, path=CEPH_BIN)
724 logging.debug(cmd)
725 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
726 cmd = "{path}/ceph osd pool create {pool} {pg} {pg} erasure {prof}".format(pool=EC_POOL, prof=PROFNAME, pg=PG_COUNT, path=CEPH_BIN)
727 logging.debug(cmd)
728 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
729 ECID = get_pool_id(EC_POOL, nullfd)
730
731 print("Created Erasure coded pool #{ecid}".format(ecid=ECID))
732
733 print("Creating {objs} objects in replicated pool".format(objs=(NUM_REP_OBJECTS*NUM_NSPACES)))
734 cmd = "mkdir -p {datadir}".format(datadir=DATADIR)
735 logging.debug(cmd)
736 call(cmd, shell=True)
737
738 db = {}
739
740 objects = range(1, NUM_REP_OBJECTS + 1)
741 nspaces = range(NUM_NSPACES)
742 for n in nspaces:
743 nspace = get_nspace(n)
744
745 db[nspace] = {}
746
747 for i in objects:
748 NAME = REP_NAME + "{num}".format(num=i)
749 LNAME = nspace + "-" + NAME
750 DDNAME = os.path.join(DATADIR, LNAME)
751 DDNAME += "__head"
752
753 cmd = "rm -f " + DDNAME
754 logging.debug(cmd)
755 call(cmd, shell=True)
756
757 if i == 1:
758 dataline = range(DATALINECOUNT)
759 else:
760 dataline = range(1)
761 fd = open(DDNAME, "w")
762 data = "This is the replicated data for " + LNAME + "\n"
763 for _ in dataline:
764 fd.write(data)
765 fd.close()
766
767 cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=REP_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
768 logging.debug(cmd)
769 ret = call(cmd, shell=True, stderr=nullfd)
770 if ret != 0:
771 logging.critical("Rados put command failed with {ret}".format(ret=ret))
772 return 1
773
774 db[nspace][NAME] = {}
775
776 if i < ATTR_OBJS + 1:
777 keys = range(i)
778 else:
779 keys = range(0)
780 db[nspace][NAME]["xattr"] = {}
781 for k in keys:
782 if k == 0:
783 continue
784 mykey = "key{i}-{k}".format(i=i, k=k)
785 myval = "val{i}-{k}".format(i=i, k=k)
786 cmd = "{path}/rados -p {pool} -N '{nspace}' setxattr {name} {key} {val}".format(pool=REP_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN)
787 logging.debug(cmd)
788 ret = call(cmd, shell=True)
789 if ret != 0:
790 logging.error("setxattr failed with {ret}".format(ret=ret))
791 ERRORS += 1
792 db[nspace][NAME]["xattr"][mykey] = myval
793
794 # Create omap header in all objects but REPobject1
795 if i < ATTR_OBJS + 1 and i != 1:
796 myhdr = "hdr{i}".format(i=i)
797 cmd = "{path}/rados -p {pool} -N '{nspace}' setomapheader {name} {hdr}".format(pool=REP_POOL, name=NAME, hdr=myhdr, nspace=nspace, path=CEPH_BIN)
798 logging.debug(cmd)
799 ret = call(cmd, shell=True)
800 if ret != 0:
801 logging.critical("setomapheader failed with {ret}".format(ret=ret))
802 ERRORS += 1
803 db[nspace][NAME]["omapheader"] = myhdr
804
805 db[nspace][NAME]["omap"] = {}
806 for k in keys:
807 if k == 0:
808 continue
809 mykey = "okey{i}-{k}".format(i=i, k=k)
810 myval = "oval{i}-{k}".format(i=i, k=k)
811 cmd = "{path}/rados -p {pool} -N '{nspace}' setomapval {name} {key} {val}".format(pool=REP_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN)
812 logging.debug(cmd)
813 ret = call(cmd, shell=True)
814 if ret != 0:
815 logging.critical("setomapval failed with {ret}".format(ret=ret))
816 db[nspace][NAME]["omap"][mykey] = myval
817
818 # Create some clones
819 cmd = "{path}/rados -p {pool} mksnap snap1".format(pool=REP_POOL, path=CEPH_BIN)
820 logging.debug(cmd)
821 call(cmd, shell=True)
822
823 objects = range(1, NUM_CLONED_REP_OBJECTS + 1)
824 nspaces = range(NUM_NSPACES)
825 for n in nspaces:
826 nspace = get_nspace(n)
827
828 for i in objects:
829 NAME = REP_NAME + "{num}".format(num=i)
830 LNAME = nspace + "-" + NAME
831 DDNAME = os.path.join(DATADIR, LNAME)
832 # First clone
833 CLONENAME = DDNAME + "__1"
834 DDNAME += "__head"
835
836 cmd = "mv -f " + DDNAME + " " + CLONENAME
837 logging.debug(cmd)
838 call(cmd, shell=True)
839
840 if i == 1:
841 dataline = range(DATALINECOUNT)
842 else:
843 dataline = range(1)
844 fd = open(DDNAME, "w")
845 data = "This is the replicated data after a snapshot for " + LNAME + "\n"
846 for _ in dataline:
847 fd.write(data)
848 fd.close()
849
850 cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=REP_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
851 logging.debug(cmd)
852 ret = call(cmd, shell=True, stderr=nullfd)
853 if ret != 0:
854 logging.critical("Rados put command failed with {ret}".format(ret=ret))
855 return 1
856
857 print("Creating {objs} objects in erasure coded pool".format(objs=(NUM_EC_OBJECTS*NUM_NSPACES)))
858
859 objects = range(1, NUM_EC_OBJECTS + 1)
860 nspaces = range(NUM_NSPACES)
861 for n in nspaces:
862 nspace = get_nspace(n)
863
864 for i in objects:
865 NAME = EC_NAME + "{num}".format(num=i)
866 LNAME = nspace + "-" + NAME
867 DDNAME = os.path.join(DATADIR, LNAME)
868 DDNAME += "__head"
869
870 cmd = "rm -f " + DDNAME
871 logging.debug(cmd)
872 call(cmd, shell=True)
873
874 if i == 1:
875 dataline = range(DATALINECOUNT)
876 else:
877 dataline = range(1)
878 fd = open(DDNAME, "w")
879 data = "This is the erasure coded data for " + LNAME + "\n"
880 for j in dataline:
881 fd.write(data)
882 fd.close()
883
884 cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=EC_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
885 logging.debug(cmd)
886 ret = call(cmd, shell=True, stderr=nullfd)
887 if ret != 0:
888 logging.critical("Erasure coded pool creation failed with {ret}".format(ret=ret))
889 return 1
890
891 db[nspace][NAME] = {}
892
893 db[nspace][NAME]["xattr"] = {}
894 if i < ATTR_OBJS + 1:
895 keys = range(i)
896 else:
897 keys = range(0)
898 for k in keys:
899 if k == 0:
900 continue
901 mykey = "key{i}-{k}".format(i=i, k=k)
902 myval = "val{i}-{k}".format(i=i, k=k)
903 cmd = "{path}/rados -p {pool} -N '{nspace}' setxattr {name} {key} {val}".format(pool=EC_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN)
904 logging.debug(cmd)
905 ret = call(cmd, shell=True)
906 if ret != 0:
907 logging.error("setxattr failed with {ret}".format(ret=ret))
908 ERRORS += 1
909 db[nspace][NAME]["xattr"][mykey] = myval
910
911 # Omap isn't supported in EC pools
912 db[nspace][NAME]["omap"] = {}
913
914 logging.debug(db)
915
916 kill_daemons()
917
918 if ERRORS:
919 logging.critical("Unable to set up test")
920 return 1
921
922 ALLREPPGS = get_pgs(OSDDIR, REPID)
923 logging.debug(ALLREPPGS)
924 ALLECPGS = get_pgs(OSDDIR, ECID)
925 logging.debug(ALLECPGS)
926
927 OBJREPPGS = get_objs(ALLREPPGS, REP_NAME, OSDDIR, REPID)
928 logging.debug(OBJREPPGS)
929 OBJECPGS = get_objs(ALLECPGS, EC_NAME, OSDDIR, ECID)
930 logging.debug(OBJECPGS)
931
932 ONEPG = ALLREPPGS[0]
933 logging.debug(ONEPG)
934 osds = get_osds(ONEPG, OSDDIR)
935 ONEOSD = osds[0]
936 logging.debug(ONEOSD)
937
938 print("Test invalid parameters")
939 # On export can't use stdout to a terminal
940 cmd = (CFSD_PREFIX + "--op export --pgid {pg}").format(osd=ONEOSD, pg=ONEPG)
941 ERRORS += test_failure(cmd, "stdout is a tty and no --file filename specified", tty=True)
942
943 # On export can't use stdout to a terminal
944 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file -").format(osd=ONEOSD, pg=ONEPG)
945 ERRORS += test_failure(cmd, "stdout is a tty and no --file filename specified", tty=True)
946
947 # Prep a valid ec export file for import failure tests
948 ONEECPG = ALLECPGS[0]
949 osds = get_osds(ONEECPG, OSDDIR)
950 ONEECOSD = osds[0]
951 OTHERFILE = "/tmp/foo.{pid}".format(pid=pid)
952 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=ONEECOSD, pg=ONEECPG, file=OTHERFILE)
953 logging.debug(cmd)
954 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
955
956 # On import can't specify a different shard
957 BADPG = ONEECPG.split('s')[0] + "s10"
958 cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=ONEECOSD, pg=BADPG, file=OTHERFILE)
959 ERRORS += test_failure(cmd, "Can't specify a different shard, must be")
960
961 os.unlink(OTHERFILE)
962
963 # Prep a valid export file for import failure tests
964 OTHERFILE = "/tmp/foo.{pid}".format(pid=pid)
965 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=ONEOSD, pg=ONEPG, file=OTHERFILE)
966 logging.debug(cmd)
967 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
968
969 # On import can't specify a PG with a non-existent pool
970 cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=ONEOSD, pg="10.0", file=OTHERFILE)
971 ERRORS += test_failure(cmd, "Can't specify a different pgid pool, must be")
972
973 # On import can't specify shard for a replicated export
974 cmd = (CFSD_PREFIX + "--op import --pgid {pg}s0 --file {file}").format(osd=ONEOSD, pg=ONEPG, file=OTHERFILE)
975 ERRORS += test_failure(cmd, "Can't specify a sharded pgid with a non-sharded export")
976
977 # On import can't specify a PG with a bad seed
978 TMPPG="{pool}.80".format(pool=REPID)
979 cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=ONEOSD, pg=TMPPG, file=OTHERFILE)
980 ERRORS += test_failure(cmd, "Illegal pgid, the seed is larger than current pg_num")
981
982 os.unlink(OTHERFILE)
983 cmd = (CFSD_PREFIX + "--op import --file {FOO}").format(osd=ONEOSD, FOO=OTHERFILE)
984 ERRORS += test_failure(cmd, "file: {FOO}: No such file or directory".format(FOO=OTHERFILE))
985
986 cmd = "{path}/ceph-objectstore-tool --data-path BAD_DATA_PATH --op list".format(osd=ONEOSD, path=CEPH_BIN)
987 ERRORS += test_failure(cmd, "data-path: BAD_DATA_PATH: No such file or directory")
988
989 cmd = "{path}/ceph-objectstore-tool --journal-path BAD_JOURNAL_PATH --op dump-journal".format(path=CEPH_BIN)
990 ERRORS += test_failure(cmd, "journal-path: BAD_JOURNAL_PATH: (2) No such file or directory")
991
992 # On import can't use stdin from a terminal
993 cmd = (CFSD_PREFIX + "--op import --pgid {pg}").format(osd=ONEOSD, pg=ONEPG)
994 ERRORS += test_failure(cmd, "stdin is a tty and no --file filename specified", tty=True)
995
996 # On import can't use stdin from a terminal
997 cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file -").format(osd=ONEOSD, pg=ONEPG)
998 ERRORS += test_failure(cmd, "stdin is a tty and no --file filename specified", tty=True)
999
1000 # Specify a bad --type
1001 os.mkdir(OSDDIR + "/fakeosd")
1002 cmd = ("{path}/ceph-objectstore-tool --data-path " + OSDDIR + "/{osd} --type foobar --op list --pgid {pg}").format(osd="fakeosd", pg=ONEPG, path=CEPH_BIN)
1003 ERRORS += test_failure(cmd, "Unable to create store of type foobar")
1004
1005 # Don't specify a data-path
1006 cmd = "{path}/ceph-objectstore-tool --type memstore --op list --pgid {pg}".format(dir=OSDDIR, osd=ONEOSD, pg=ONEPG, path=CEPH_BIN)
1007 ERRORS += test_failure(cmd, "Must provide --data-path")
1008
1009 cmd = (CFSD_PREFIX + "--op remove").format(osd=ONEOSD)
1010 ERRORS += test_failure(cmd, "Must provide pgid")
1011
1012 # Don't secify a --op nor object command
1013 cmd = CFSD_PREFIX.format(osd=ONEOSD)
1014 ERRORS += test_failure(cmd, "Must provide --op or object command...")
1015
1016 # Specify a bad --op command
1017 cmd = (CFSD_PREFIX + "--op oops").format(osd=ONEOSD)
1018 ERRORS += test_failure(cmd, "Must provide --op (info, log, remove, mkfs, fsck, export, import, list, fix-lost, list-pgs, rm-past-intervals, dump-journal, dump-super, meta-list, get-osdmap, set-osdmap, get-inc-osdmap, set-inc-osdmap, mark-complete)")
1019
1020 # Provide just the object param not a command
1021 cmd = (CFSD_PREFIX + "object").format(osd=ONEOSD)
1022 ERRORS += test_failure(cmd, "Invalid syntax, missing command")
1023
1024 # Provide an object name that doesn't exist
1025 cmd = (CFSD_PREFIX + "NON_OBJECT get-bytes").format(osd=ONEOSD)
1026 ERRORS += test_failure(cmd, "No object id 'NON_OBJECT' found")
1027
1028 # Provide an invalid object command
1029 cmd = (CFSD_PREFIX + "--pgid {pg} '' notacommand").format(osd=ONEOSD, pg=ONEPG)
1030 ERRORS += test_failure(cmd, "Unknown object command 'notacommand'")
1031
1032 cmd = (CFSD_PREFIX + "foo list-omap").format(osd=ONEOSD, pg=ONEPG)
1033 ERRORS += test_failure(cmd, "No object id 'foo' found or invalid JSON specified")
1034
1035 cmd = (CFSD_PREFIX + "'{{\"oid\":\"obj4\",\"key\":\"\",\"snapid\":-1,\"hash\":2826278768,\"max\":0,\"pool\":1,\"namespace\":\"\"}}' list-omap").format(osd=ONEOSD, pg=ONEPG)
1036 ERRORS += test_failure(cmd, "Without --pgid the object '{\"oid\":\"obj4\",\"key\":\"\",\"snapid\":-1,\"hash\":2826278768,\"max\":0,\"pool\":1,\"namespace\":\"\"}' must be a JSON array")
1037
1038 cmd = (CFSD_PREFIX + "'[]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1039 ERRORS += test_failure(cmd, "Object '[]' must be a JSON array with 2 elements")
1040
1041 cmd = (CFSD_PREFIX + "'[\"1.0\"]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1042 ERRORS += test_failure(cmd, "Object '[\"1.0\"]' must be a JSON array with 2 elements")
1043
1044 cmd = (CFSD_PREFIX + "'[\"1.0\", 5, 8, 9]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1045 ERRORS += test_failure(cmd, "Object '[\"1.0\", 5, 8, 9]' must be a JSON array with 2 elements")
1046
1047 cmd = (CFSD_PREFIX + "'[1, 2]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1048 ERRORS += test_failure(cmd, "Object '[1, 2]' must be a JSON array with the first element a string")
1049
1050 cmd = (CFSD_PREFIX + "'[\"1.3\",{{\"snapid\":\"not an int\"}}]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1051 ERRORS += test_failure(cmd, "Decode object JSON error: value type is 2 not 4")
1052
1053 TMPFILE = r"/tmp/tmp.{pid}".format(pid=pid)
1054 ALLPGS = OBJREPPGS + OBJECPGS
1055 OSDS = get_osds(ALLPGS[0], OSDDIR)
1056 osd = OSDS[0]
1057
1058 print("Test all --op dump-journal")
1059 ALLOSDS = [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]
1060 ERRORS += test_dump_journal(CFSD_PREFIX, ALLOSDS)
1061
1062 # Test --op list and generate json for all objects
1063 print("Test --op list variants")
1064
1065 # retrieve all objects from all PGs
1066 tmpfd = open(TMPFILE, "wb")
1067 cmd = (CFSD_PREFIX + "--op list --format json").format(osd=osd)
1068 logging.debug(cmd)
1069 ret = call(cmd, shell=True, stdout=tmpfd)
1070 if ret != 0:
1071 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
1072 ERRORS += 1
1073 tmpfd.close()
1074 lines = get_lines(TMPFILE)
1075 JSONOBJ = sorted(set(lines))
1076 (pgid, coll, jsondict) = json.loads(JSONOBJ[0])[0]
1077
1078 # retrieve all objects in a given PG
1079 tmpfd = open(OTHERFILE, "ab")
1080 cmd = (CFSD_PREFIX + "--op list --pgid {pg} --format json").format(osd=osd, pg=pgid)
1081 logging.debug(cmd)
1082 ret = call(cmd, shell=True, stdout=tmpfd)
1083 if ret != 0:
1084 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
1085 ERRORS += 1
1086 tmpfd.close()
1087 lines = get_lines(OTHERFILE)
1088 JSONOBJ = sorted(set(lines))
1089 (other_pgid, other_coll, other_jsondict) = json.loads(JSONOBJ[0])[0]
1090
1091 if pgid != other_pgid or jsondict != other_jsondict or coll != other_coll:
1092 logging.error("the first line of --op list is different "
1093 "from the first line of --op list --pgid {pg}".format(pg=pgid))
1094 ERRORS += 1
1095
1096 # retrieve all objects with a given name in a given PG
1097 tmpfd = open(OTHERFILE, "wb")
1098 cmd = (CFSD_PREFIX + "--op list --pgid {pg} {object} --format json").format(osd=osd, pg=pgid, object=jsondict['oid'])
1099 logging.debug(cmd)
1100 ret = call(cmd, shell=True, stdout=tmpfd)
1101 if ret != 0:
1102 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
1103 ERRORS += 1
1104 tmpfd.close()
1105 lines = get_lines(OTHERFILE)
1106 JSONOBJ = sorted(set(lines))
1107 (other_pgid, other_coll, other_jsondict) in json.loads(JSONOBJ[0])[0]
1108
1109 if pgid != other_pgid or jsondict != other_jsondict or coll != other_coll:
1110 logging.error("the first line of --op list is different "
1111 "from the first line of --op list --pgid {pg} {object}".format(pg=pgid, object=jsondict['oid']))
1112 ERRORS += 1
1113
1114 print("Test --op list by generating json for all objects using default format")
1115 for pg in ALLPGS:
1116 OSDS = get_osds(pg, OSDDIR)
1117 for osd in OSDS:
1118 tmpfd = open(TMPFILE, "ab")
1119 cmd = (CFSD_PREFIX + "--op list --pgid {pg}").format(osd=osd, pg=pg)
1120 logging.debug(cmd)
1121 ret = call(cmd, shell=True, stdout=tmpfd)
1122 if ret != 0:
1123 logging.error("Bad exit status {ret} from --op list request".format(ret=ret))
1124 ERRORS += 1
1125
1126 tmpfd.close()
1127 lines = get_lines(TMPFILE)
1128 JSONOBJ = sorted(set(lines))
1129 for JSON in JSONOBJ:
1130 (pgid, jsondict) = json.loads(JSON)
1131 # Skip clones for now
1132 if jsondict['snapid'] != -2:
1133 continue
1134 db[jsondict['namespace']][jsondict['oid']]['json'] = json.dumps((pgid, jsondict))
1135 # print db[jsondict['namespace']][jsondict['oid']]['json']
1136 if jsondict['oid'].find(EC_NAME) == 0 and 'shard_id' not in jsondict:
1137 logging.error("Malformed JSON {json}".format(json=JSON))
1138 ERRORS += 1
1139
1140 # Test get-bytes
1141 print("Test get-bytes and set-bytes")
1142 for nspace in db.keys():
1143 for basename in db[nspace].keys():
1144 file = os.path.join(DATADIR, nspace + "-" + basename + "__head")
1145 JSON = db[nspace][basename]['json']
1146 GETNAME = "/tmp/getbytes.{pid}".format(pid=pid)
1147 TESTNAME = "/tmp/testbytes.{pid}".format(pid=pid)
1148 SETNAME = "/tmp/setbytes.{pid}".format(pid=pid)
1149 BADNAME = "/tmp/badbytes.{pid}".format(pid=pid)
1150 for pg in OBJREPPGS:
1151 OSDS = get_osds(pg, OSDDIR)
1152 for osd in OSDS:
1153 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1154 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1155 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1156 if not fnames:
1157 continue
1158 try:
1159 os.unlink(GETNAME)
1160 except:
1161 pass
1162 cmd = (CFSD_PREFIX + " --pgid {pg} '{json}' get-bytes {fname}").format(osd=osd, pg=pg, json=JSON, fname=GETNAME)
1163 logging.debug(cmd)
1164 ret = call(cmd, shell=True)
1165 if ret != 0:
1166 logging.error("Bad exit status {ret}".format(ret=ret))
1167 ERRORS += 1
1168 continue
1169 cmd = "diff -q {file} {getfile}".format(file=file, getfile=GETNAME)
1170 ret = call(cmd, shell=True)
1171 if ret != 0:
1172 logging.error("Data from get-bytes differ")
1173 logging.debug("Got:")
1174 cat_file(logging.DEBUG, GETNAME)
1175 logging.debug("Expected:")
1176 cat_file(logging.DEBUG, file)
1177 ERRORS += 1
1178 fd = open(SETNAME, "w")
1179 data = "put-bytes going into {file}\n".format(file=file)
1180 fd.write(data)
1181 fd.close()
1182 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' set-bytes {sname}").format(osd=osd, pg=pg, json=JSON, sname=SETNAME)
1183 logging.debug(cmd)
1184 ret = call(cmd, shell=True)
1185 if ret != 0:
1186 logging.error("Bad exit status {ret} from set-bytes".format(ret=ret))
1187 ERRORS += 1
1188 fd = open(TESTNAME, "wb")
1189 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-bytes -").format(osd=osd, pg=pg, json=JSON)
1190 logging.debug(cmd)
1191 ret = call(cmd, shell=True, stdout=fd)
1192 fd.close()
1193 if ret != 0:
1194 logging.error("Bad exit status {ret} from get-bytes".format(ret=ret))
1195 ERRORS += 1
1196 cmd = "diff -q {setfile} {testfile}".format(setfile=SETNAME, testfile=TESTNAME)
1197 logging.debug(cmd)
1198 ret = call(cmd, shell=True)
1199 if ret != 0:
1200 logging.error("Data after set-bytes differ")
1201 logging.debug("Got:")
1202 cat_file(logging.DEBUG, TESTNAME)
1203 logging.debug("Expected:")
1204 cat_file(logging.DEBUG, SETNAME)
1205 ERRORS += 1
1206
1207 # Use set-bytes with --dry-run and make sure contents haven't changed
1208 fd = open(BADNAME, "w")
1209 data = "Bad data for --dry-run in {file}\n".format(file=file)
1210 fd.write(data)
1211 fd.close()
1212 cmd = (CFSD_PREFIX + "--dry-run --pgid {pg} '{json}' set-bytes {sname}").format(osd=osd, pg=pg, json=JSON, sname=BADNAME)
1213 logging.debug(cmd)
1214 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1215 if ret != 0:
1216 logging.error("Bad exit status {ret} from set-bytes --dry-run".format(ret=ret))
1217 ERRORS += 1
1218 fd = open(TESTNAME, "wb")
1219 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-bytes -").format(osd=osd, pg=pg, json=JSON)
1220 logging.debug(cmd)
1221 ret = call(cmd, shell=True, stdout=fd)
1222 fd.close()
1223 if ret != 0:
1224 logging.error("Bad exit status {ret} from get-bytes".format(ret=ret))
1225 ERRORS += 1
1226 cmd = "diff -q {setfile} {testfile}".format(setfile=SETNAME, testfile=TESTNAME)
1227 logging.debug(cmd)
1228 ret = call(cmd, shell=True)
1229 if ret != 0:
1230 logging.error("Data after set-bytes --dry-run changed!")
1231 logging.debug("Got:")
1232 cat_file(logging.DEBUG, TESTNAME)
1233 logging.debug("Expected:")
1234 cat_file(logging.DEBUG, SETNAME)
1235 ERRORS += 1
1236
1237 fd = open(file, "rb")
1238 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' set-bytes").format(osd=osd, pg=pg, json=JSON)
1239 logging.debug(cmd)
1240 ret = call(cmd, shell=True, stdin=fd)
1241 if ret != 0:
1242 logging.error("Bad exit status {ret} from set-bytes to restore object".format(ret=ret))
1243 ERRORS += 1
1244 fd.close()
1245
1246 try:
1247 os.unlink(GETNAME)
1248 except:
1249 pass
1250 try:
1251 os.unlink(TESTNAME)
1252 except:
1253 pass
1254 try:
1255 os.unlink(SETNAME)
1256 except:
1257 pass
1258 try:
1259 os.unlink(BADNAME)
1260 except:
1261 pass
1262
1263 # Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap
1264 print("Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap")
1265 for nspace in db.keys():
1266 for basename in db[nspace].keys():
1267 file = os.path.join(DATADIR, nspace + "-" + basename + "__head")
1268 JSON = db[nspace][basename]['json']
1269 for pg in OBJREPPGS:
1270 OSDS = get_osds(pg, OSDDIR)
1271 for osd in OSDS:
1272 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1273 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1274 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1275 if not fnames:
1276 continue
1277 for key, val in db[nspace][basename]["xattr"].items():
1278 attrkey = "_" + key
1279 cmd = (CFSD_PREFIX + " '{json}' get-attr {key}").format(osd=osd, json=JSON, key=attrkey)
1280 logging.debug(cmd)
1281 getval = check_output(cmd, shell=True)
1282 if getval != val:
1283 logging.error("get-attr of key {key} returned wrong val: {get} instead of {orig}".format(key=attrkey, get=getval, orig=val))
1284 ERRORS += 1
1285 continue
1286 # set-attr to bogus value "foobar"
1287 cmd = ("echo -n foobar | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1288 logging.debug(cmd)
1289 ret = call(cmd, shell=True)
1290 if ret != 0:
1291 logging.error("Bad exit status {ret} from set-attr".format(ret=ret))
1292 ERRORS += 1
1293 continue
1294 # Test set-attr with dry-run
1295 cmd = ("echo -n dryrunbroken | " + CFSD_PREFIX + "--dry-run '{json}' set-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1296 logging.debug(cmd)
1297 ret = call(cmd, shell=True, stdout=nullfd)
1298 if ret != 0:
1299 logging.error("Bad exit status {ret} from set-attr".format(ret=ret))
1300 ERRORS += 1
1301 continue
1302 # Check the set-attr
1303 cmd = (CFSD_PREFIX + " --pgid {pg} '{json}' get-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1304 logging.debug(cmd)
1305 getval = check_output(cmd, shell=True)
1306 if ret != 0:
1307 logging.error("Bad exit status {ret} from get-attr".format(ret=ret))
1308 ERRORS += 1
1309 continue
1310 if getval != "foobar":
1311 logging.error("Check of set-attr failed because we got {val}".format(val=getval))
1312 ERRORS += 1
1313 continue
1314 # Test rm-attr
1315 cmd = (CFSD_PREFIX + "'{json}' rm-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1316 logging.debug(cmd)
1317 ret = call(cmd, shell=True)
1318 if ret != 0:
1319 logging.error("Bad exit status {ret} from rm-attr".format(ret=ret))
1320 ERRORS += 1
1321 continue
1322 # Check rm-attr with dry-run
1323 cmd = (CFSD_PREFIX + "--dry-run '{json}' rm-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1324 logging.debug(cmd)
1325 ret = call(cmd, shell=True, stdout=nullfd)
1326 if ret != 0:
1327 logging.error("Bad exit status {ret} from rm-attr".format(ret=ret))
1328 ERRORS += 1
1329 continue
1330 cmd = (CFSD_PREFIX + "'{json}' get-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1331 logging.debug(cmd)
1332 ret = call(cmd, shell=True, stderr=nullfd, stdout=nullfd)
1333 if ret == 0:
1334 logging.error("For rm-attr expect get-attr to fail, but it succeeded")
1335 ERRORS += 1
1336 # Put back value
1337 cmd = ("echo -n {val} | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey, val=val)
1338 logging.debug(cmd)
1339 ret = call(cmd, shell=True)
1340 if ret != 0:
1341 logging.error("Bad exit status {ret} from set-attr".format(ret=ret))
1342 ERRORS += 1
1343 continue
1344
1345 hdr = db[nspace][basename].get("omapheader", "")
1346 cmd = (CFSD_PREFIX + "'{json}' get-omaphdr").format(osd=osd, json=JSON)
1347 logging.debug(cmd)
1348 gethdr = check_output(cmd, shell=True)
1349 if gethdr != hdr:
1350 logging.error("get-omaphdr was wrong: {get} instead of {orig}".format(get=gethdr, orig=hdr))
1351 ERRORS += 1
1352 continue
1353 # set-omaphdr to bogus value "foobar"
1354 cmd = ("echo -n foobar | " + CFSD_PREFIX + "'{json}' set-omaphdr").format(osd=osd, pg=pg, json=JSON)
1355 logging.debug(cmd)
1356 ret = call(cmd, shell=True)
1357 if ret != 0:
1358 logging.error("Bad exit status {ret} from set-omaphdr".format(ret=ret))
1359 ERRORS += 1
1360 continue
1361 # Check the set-omaphdr
1362 cmd = (CFSD_PREFIX + "'{json}' get-omaphdr").format(osd=osd, pg=pg, json=JSON)
1363 logging.debug(cmd)
1364 gethdr = check_output(cmd, shell=True)
1365 if ret != 0:
1366 logging.error("Bad exit status {ret} from get-omaphdr".format(ret=ret))
1367 ERRORS += 1
1368 continue
1369 if gethdr != "foobar":
1370 logging.error("Check of set-omaphdr failed because we got {val}".format(val=getval))
1371 ERRORS += 1
1372 continue
1373 # Test dry-run with set-omaphdr
1374 cmd = ("echo -n dryrunbroken | " + CFSD_PREFIX + "--dry-run '{json}' set-omaphdr").format(osd=osd, pg=pg, json=JSON)
1375 logging.debug(cmd)
1376 ret = call(cmd, shell=True, stdout=nullfd)
1377 if ret != 0:
1378 logging.error("Bad exit status {ret} from set-omaphdr".format(ret=ret))
1379 ERRORS += 1
1380 continue
1381 # Put back value
1382 cmd = ("echo -n {val} | " + CFSD_PREFIX + "'{json}' set-omaphdr").format(osd=osd, pg=pg, json=JSON, val=hdr)
1383 logging.debug(cmd)
1384 ret = call(cmd, shell=True)
1385 if ret != 0:
1386 logging.error("Bad exit status {ret} from set-omaphdr".format(ret=ret))
1387 ERRORS += 1
1388 continue
1389
1390 for omapkey, val in db[nspace][basename]["omap"].items():
1391 cmd = (CFSD_PREFIX + " '{json}' get-omap {key}").format(osd=osd, json=JSON, key=omapkey)
1392 logging.debug(cmd)
1393 getval = check_output(cmd, shell=True)
1394 if getval != val:
1395 logging.error("get-omap of key {key} returned wrong val: {get} instead of {orig}".format(key=omapkey, get=getval, orig=val))
1396 ERRORS += 1
1397 continue
1398 # set-omap to bogus value "foobar"
1399 cmd = ("echo -n foobar | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1400 logging.debug(cmd)
1401 ret = call(cmd, shell=True)
1402 if ret != 0:
1403 logging.error("Bad exit status {ret} from set-omap".format(ret=ret))
1404 ERRORS += 1
1405 continue
1406 # Check set-omap with dry-run
1407 cmd = ("echo -n dryrunbroken | " + CFSD_PREFIX + "--dry-run --pgid {pg} '{json}' set-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1408 logging.debug(cmd)
1409 ret = call(cmd, shell=True, stdout=nullfd)
1410 if ret != 0:
1411 logging.error("Bad exit status {ret} from set-omap".format(ret=ret))
1412 ERRORS += 1
1413 continue
1414 # Check the set-omap
1415 cmd = (CFSD_PREFIX + " --pgid {pg} '{json}' get-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1416 logging.debug(cmd)
1417 getval = check_output(cmd, shell=True)
1418 if ret != 0:
1419 logging.error("Bad exit status {ret} from get-omap".format(ret=ret))
1420 ERRORS += 1
1421 continue
1422 if getval != "foobar":
1423 logging.error("Check of set-omap failed because we got {val}".format(val=getval))
1424 ERRORS += 1
1425 continue
1426 # Test rm-omap
1427 cmd = (CFSD_PREFIX + "'{json}' rm-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1428 logging.debug(cmd)
1429 ret = call(cmd, shell=True)
1430 if ret != 0:
1431 logging.error("Bad exit status {ret} from rm-omap".format(ret=ret))
1432 ERRORS += 1
1433 # Check rm-omap with dry-run
1434 cmd = (CFSD_PREFIX + "--dry-run '{json}' rm-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1435 logging.debug(cmd)
1436 ret = call(cmd, shell=True, stdout=nullfd)
1437 if ret != 0:
1438 logging.error("Bad exit status {ret} from rm-omap".format(ret=ret))
1439 ERRORS += 1
1440 cmd = (CFSD_PREFIX + "'{json}' get-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1441 logging.debug(cmd)
1442 ret = call(cmd, shell=True, stderr=nullfd, stdout=nullfd)
1443 if ret == 0:
1444 logging.error("For rm-omap expect get-omap to fail, but it succeeded")
1445 ERRORS += 1
1446 # Put back value
1447 cmd = ("echo -n {val} | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey, val=val)
1448 logging.debug(cmd)
1449 ret = call(cmd, shell=True)
1450 if ret != 0:
1451 logging.error("Bad exit status {ret} from set-omap".format(ret=ret))
1452 ERRORS += 1
1453 continue
1454
1455 # Test dump
1456 print("Test dump")
1457 for nspace in db.keys():
1458 for basename in db[nspace].keys():
1459 file = os.path.join(DATADIR, nspace + "-" + basename + "__head")
1460 JSON = db[nspace][basename]['json']
1461 GETNAME = "/tmp/getbytes.{pid}".format(pid=pid)
1462 for pg in OBJREPPGS:
1463 OSDS = get_osds(pg, OSDDIR)
1464 for osd in OSDS:
1465 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1466 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1467 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1468 if not fnames:
1469 continue
1470 if int(basename.split(REP_NAME)[1]) > int(NUM_CLONED_REP_OBJECTS):
1471 continue
1472 cmd = (CFSD_PREFIX + " '{json}' dump | grep '\"snap\": 1,' > /dev/null").format(osd=osd, json=JSON)
1473 logging.debug(cmd)
1474 ret = call(cmd, shell=True)
1475 if ret != 0:
1476 logging.error("Invalid dump for {json}".format(json=JSON))
1477 ERRORS += 1
1478
1479 print("Test list-attrs get-attr")
1480 ATTRFILE = r"/tmp/attrs.{pid}".format(pid=pid)
1481 VALFILE = r"/tmp/val.{pid}".format(pid=pid)
1482 for nspace in db.keys():
1483 for basename in db[nspace].keys():
1484 file = os.path.join(DATADIR, nspace + "-" + basename)
1485 JSON = db[nspace][basename]['json']
1486 jsondict = json.loads(JSON)
1487
1488 if 'shard_id' in jsondict:
1489 logging.debug("ECobject " + JSON)
1490 found = 0
1491 for pg in OBJECPGS:
1492 OSDS = get_osds(pg, OSDDIR)
1493 # Fix shard_id since we only have one json instance for each object
1494 jsondict['shard_id'] = int(pg.split('s')[1])
1495 JSON = json.dumps(jsondict)
1496 for osd in OSDS:
1497 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-attr hinfo_key").format(osd=osd, pg=pg, json=JSON)
1498 logging.debug("TRY: " + cmd)
1499 try:
1500 out = check_output(cmd, shell=True, stderr=subprocess.STDOUT)
1501 logging.debug("FOUND: {json} in {osd} has value '{val}'".format(osd=osd, json=JSON, val=out))
1502 found += 1
1503 except subprocess.CalledProcessError as e:
1504 if "No such file or directory" not in e.output and "No data available" not in e.output:
1505 raise
1506 # Assuming k=2 m=1 for the default ec pool
1507 if found != 3:
1508 logging.error("{json} hinfo_key found {found} times instead of 3".format(json=JSON, found=found))
1509 ERRORS += 1
1510
1511 for pg in ALLPGS:
1512 # Make sure rep obj with rep pg or ec obj with ec pg
1513 if ('shard_id' in jsondict) != (pg.find('s') > 0):
1514 continue
1515 if 'shard_id' in jsondict:
1516 # Fix shard_id since we only have one json instance for each object
1517 jsondict['shard_id'] = int(pg.split('s')[1])
1518 JSON = json.dumps(jsondict)
1519 OSDS = get_osds(pg, OSDDIR)
1520 for osd in OSDS:
1521 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1522 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1523 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1524 if not fnames:
1525 continue
1526 afd = open(ATTRFILE, "wb")
1527 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' list-attrs").format(osd=osd, pg=pg, json=JSON)
1528 logging.debug(cmd)
1529 ret = call(cmd, shell=True, stdout=afd)
1530 afd.close()
1531 if ret != 0:
1532 logging.error("list-attrs failed with {ret}".format(ret=ret))
1533 ERRORS += 1
1534 continue
1535 keys = get_lines(ATTRFILE)
1536 values = dict(db[nspace][basename]["xattr"])
1537 for key in keys:
1538 if key == "_" or key == "snapset" or key == "hinfo_key":
1539 continue
1540 key = key.strip("_")
1541 if key not in values:
1542 logging.error("Unexpected key {key} present".format(key=key))
1543 ERRORS += 1
1544 continue
1545 exp = values.pop(key)
1546 vfd = open(VALFILE, "wb")
1547 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-attr {key}").format(osd=osd, pg=pg, json=JSON, key="_" + key)
1548 logging.debug(cmd)
1549 ret = call(cmd, shell=True, stdout=vfd)
1550 vfd.close()
1551 if ret != 0:
1552 logging.error("get-attr failed with {ret}".format(ret=ret))
1553 ERRORS += 1
1554 continue
1555 lines = get_lines(VALFILE)
1556 val = lines[0]
1557 if exp != val:
1558 logging.error("For key {key} got value {got} instead of {expected}".format(key=key, got=val, expected=exp))
1559 ERRORS += 1
1560 if len(values) != 0:
1561 logging.error("Not all keys found, remaining keys:")
1562 print(values)
1563
1564 print("Test --op meta-list")
1565 tmpfd = open(TMPFILE, "wb")
1566 cmd = (CFSD_PREFIX + "--op meta-list").format(osd=ONEOSD)
1567 logging.debug(cmd)
1568 ret = call(cmd, shell=True, stdout=tmpfd)
1569 if ret != 0:
1570 logging.error("Bad exit status {ret} from --op meta-list request".format(ret=ret))
1571 ERRORS += 1
1572
1573 print("Test get-bytes on meta")
1574 tmpfd.close()
1575 lines = get_lines(TMPFILE)
1576 JSONOBJ = sorted(set(lines))
1577 for JSON in JSONOBJ:
1578 (pgid, jsondict) = json.loads(JSON)
1579 if pgid != "meta":
1580 logging.error("pgid incorrect for --op meta-list {pgid}".format(pgid=pgid))
1581 ERRORS += 1
1582 if jsondict['namespace'] != "":
1583 logging.error("namespace non null --op meta-list {ns}".format(ns=jsondict['namespace']))
1584 ERRORS += 1
1585 logging.info(JSON)
1586 try:
1587 os.unlink(GETNAME)
1588 except:
1589 pass
1590 cmd = (CFSD_PREFIX + "'{json}' get-bytes {fname}").format(osd=ONEOSD, json=JSON, fname=GETNAME)
1591 logging.debug(cmd)
1592 ret = call(cmd, shell=True)
1593 if ret != 0:
1594 logging.error("Bad exit status {ret}".format(ret=ret))
1595 ERRORS += 1
1596
1597 try:
1598 os.unlink(GETNAME)
1599 except:
1600 pass
1601 try:
1602 os.unlink(TESTNAME)
1603 except:
1604 pass
1605
1606 print("Test pg info")
1607 for pg in ALLREPPGS + ALLECPGS:
1608 for osd in get_osds(pg, OSDDIR):
1609 cmd = (CFSD_PREFIX + "--op info --pgid {pg} | grep '\"pgid\": \"{pg}\"'").format(osd=osd, pg=pg)
1610 logging.debug(cmd)
1611 ret = call(cmd, shell=True, stdout=nullfd)
1612 if ret != 0:
1613 logging.error("Getting info failed for pg {pg} from {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1614 ERRORS += 1
1615
1616 print("Test pg logging")
1617 if len(ALLREPPGS + ALLECPGS) == len(OBJREPPGS + OBJECPGS):
1618 logging.warning("All PGs have objects, so no log without modify entries")
1619 for pg in ALLREPPGS + ALLECPGS:
1620 for osd in get_osds(pg, OSDDIR):
1621 tmpfd = open(TMPFILE, "wb")
1622 cmd = (CFSD_PREFIX + "--op log --pgid {pg}").format(osd=osd, pg=pg)
1623 logging.debug(cmd)
1624 ret = call(cmd, shell=True, stdout=tmpfd)
1625 if ret != 0:
1626 logging.error("Getting log failed for pg {pg} from {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1627 ERRORS += 1
1628 HASOBJ = pg in OBJREPPGS + OBJECPGS
1629 MODOBJ = False
1630 for line in get_lines(TMPFILE):
1631 if line.find("modify") != -1:
1632 MODOBJ = True
1633 break
1634 if HASOBJ != MODOBJ:
1635 logging.error("Bad log for pg {pg} from {osd}".format(pg=pg, osd=osd))
1636 MSG = (HASOBJ and [""] or ["NOT "])[0]
1637 print("Log should {msg}have a modify entry".format(msg=MSG))
1638 ERRORS += 1
1639
1640 try:
1641 os.unlink(TMPFILE)
1642 except:
1643 pass
1644
1645 print("Test list-pgs")
1646 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1647
1648 CHECK_PGS = get_osd_pgs(os.path.join(OSDDIR, osd), None)
1649 CHECK_PGS = sorted(CHECK_PGS)
1650
1651 cmd = (CFSD_PREFIX + "--op list-pgs").format(osd=osd)
1652 logging.debug(cmd)
1653 TEST_PGS = check_output(cmd, shell=True).split("\n")
1654 TEST_PGS = sorted(TEST_PGS)[1:] # Skip extra blank line
1655
1656 if TEST_PGS != CHECK_PGS:
1657 logging.error("list-pgs got wrong result for osd.{osd}".format(osd=osd))
1658 logging.error("Expected {pgs}".format(pgs=CHECK_PGS))
1659 logging.error("Got {pgs}".format(pgs=TEST_PGS))
1660 ERRORS += 1
1661
1662 EXP_ERRORS = 0
1663 print("Test pg export --dry-run")
1664 pg = ALLREPPGS[0]
1665 osd = get_osds(pg, OSDDIR)[0]
1666 fname = "/tmp/fname.{pid}".format(pid=pid)
1667 cmd = (CFSD_PREFIX + "--dry-run --op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
1668 logging.debug(cmd)
1669 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1670 if ret != 0:
1671 logging.error("Exporting --dry-run failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1672 EXP_ERRORS += 1
1673 elif os.path.exists(fname):
1674 logging.error("Exporting --dry-run created file")
1675 EXP_ERRORS += 1
1676
1677 cmd = (CFSD_PREFIX + "--dry-run --op export --pgid {pg} > {file}").format(osd=osd, pg=pg, file=fname)
1678 logging.debug(cmd)
1679 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1680 if ret != 0:
1681 logging.error("Exporting --dry-run failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1682 EXP_ERRORS += 1
1683 else:
1684 outdata = get_lines(fname)
1685 if len(outdata) > 0:
1686 logging.error("Exporting --dry-run to stdout not empty")
1687 logging.error("Data: " + outdata)
1688 EXP_ERRORS += 1
1689
1690 os.mkdir(TESTDIR)
1691 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1692 os.mkdir(os.path.join(TESTDIR, osd))
1693 print("Test pg export")
1694 for pg in ALLREPPGS + ALLECPGS:
1695 for osd in get_osds(pg, OSDDIR):
1696 mydir = os.path.join(TESTDIR, osd)
1697 fname = os.path.join(mydir, pg)
1698 if pg == ALLREPPGS[0]:
1699 cmd = (CFSD_PREFIX + "--op export --pgid {pg} > {file}").format(osd=osd, pg=pg, file=fname)
1700 elif pg == ALLREPPGS[1]:
1701 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file - > {file}").format(osd=osd, pg=pg, file=fname)
1702 else:
1703 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
1704 logging.debug(cmd)
1705 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1706 if ret != 0:
1707 logging.error("Exporting failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1708 EXP_ERRORS += 1
1709
1710 ERRORS += EXP_ERRORS
1711
1712 print("Test pg removal")
1713 RM_ERRORS = 0
1714 for pg in ALLREPPGS + ALLECPGS:
1715 for osd in get_osds(pg, OSDDIR):
1716 # This should do nothing
1717 cmd = (CFSD_PREFIX + "--op remove --pgid {pg} --dry-run").format(pg=pg, osd=osd)
1718 logging.debug(cmd)
1719 ret = call(cmd, shell=True, stdout=nullfd)
1720 if ret != 0:
1721 logging.error("Removing --dry-run failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1722 RM_ERRORS += 1
1723 cmd = (CFSD_PREFIX + "--op remove --pgid {pg}").format(pg=pg, osd=osd)
1724 logging.debug(cmd)
1725 ret = call(cmd, shell=True, stdout=nullfd)
1726 if ret != 0:
1727 logging.error("Removing failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1728 RM_ERRORS += 1
1729
1730 ERRORS += RM_ERRORS
1731
1732 IMP_ERRORS = 0
1733 if EXP_ERRORS == 0 and RM_ERRORS == 0:
1734 print("Test pg import")
1735 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1736 dir = os.path.join(TESTDIR, osd)
1737 PGS = [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))]
1738 for pg in PGS:
1739 file = os.path.join(dir, pg)
1740 # This should do nothing
1741 cmd = (CFSD_PREFIX + "--op import --file {file} --dry-run").format(osd=osd, file=file)
1742 logging.debug(cmd)
1743 ret = call(cmd, shell=True, stdout=nullfd)
1744 if ret != 0:
1745 logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret))
1746 IMP_ERRORS += 1
1747 if pg == PGS[0]:
1748 cmd = ("cat {file} |".format(file=file) + CFSD_PREFIX + "--op import").format(osd=osd)
1749 elif pg == PGS[1]:
1750 cmd = (CFSD_PREFIX + "--op import --file - --pgid {pg} < {file}").format(osd=osd, file=file, pg=pg)
1751 else:
1752 cmd = (CFSD_PREFIX + "--op import --file {file}").format(osd=osd, file=file)
1753 logging.debug(cmd)
1754 ret = call(cmd, shell=True, stdout=nullfd)
1755 if ret != 0:
1756 logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret))
1757 IMP_ERRORS += 1
1758 else:
1759 logging.warning("SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES")
1760
1761 ERRORS += IMP_ERRORS
1762 logging.debug(cmd)
1763
1764 if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0:
1765 print("Verify replicated import data")
1766 data_errors, _ = check_data(DATADIR, TMPFILE, OSDDIR, REP_NAME)
1767 ERRORS += data_errors
1768 else:
1769 logging.warning("SKIPPING CHECKING IMPORT DATA DUE TO PREVIOUS FAILURES")
1770
1771 print("Test all --op dump-journal again")
1772 ALLOSDS = [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]
1773 ERRORS += test_dump_journal(CFSD_PREFIX, ALLOSDS)
1774
1775 vstart(new=False)
1776 wait_for_health()
1777
1778 if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0:
1779 print("Verify erasure coded import data")
1780 ERRORS += verify(DATADIR, EC_POOL, EC_NAME, db)
1781 # Check replicated data/xattr/omap using rados
1782 print("Verify replicated import data using rados")
1783 ERRORS += verify(DATADIR, REP_POOL, REP_NAME, db)
1784
1785 if EXP_ERRORS == 0:
1786 NEWPOOL = "rados-import-pool"
1787 cmd = "{path}/rados mkpool {pool}".format(pool=NEWPOOL, path=CEPH_BIN)
1788 logging.debug(cmd)
1789 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1790
1791 print("Test rados import")
1792 first = True
1793 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1794 dir = os.path.join(TESTDIR, osd)
1795 for pg in [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))]:
1796 if pg.find("{id}.".format(id=REPID)) != 0:
1797 continue
1798 file = os.path.join(dir, pg)
1799 if first:
1800 first = False
1801 # This should do nothing
1802 cmd = "{path}/rados import -p {pool} --dry-run {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN)
1803 logging.debug(cmd)
1804 ret = call(cmd, shell=True, stdout=nullfd)
1805 if ret != 0:
1806 logging.error("Rados import --dry-run failed from {file} with {ret}".format(file=file, ret=ret))
1807 ERRORS += 1
1808 cmd = "{path}/rados -p {pool} ls".format(pool=NEWPOOL, path=CEPH_BIN)
1809 logging.debug(cmd)
1810 data = check_output(cmd, shell=True)
1811 if data:
1812 logging.error("'{data}'".format(data=data))
1813 logging.error("Found objects after dry-run")
1814 ERRORS += 1
1815 cmd = "{path}/rados import -p {pool} {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN)
1816 logging.debug(cmd)
1817 ret = call(cmd, shell=True, stdout=nullfd)
1818 if ret != 0:
1819 logging.error("Rados import failed from {file} with {ret}".format(file=file, ret=ret))
1820 ERRORS += 1
1821 cmd = "{path}/rados import -p {pool} --no-overwrite {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN)
1822 logging.debug(cmd)
1823 ret = call(cmd, shell=True, stdout=nullfd)
1824 if ret != 0:
1825 logging.error("Rados import --no-overwrite failed from {file} with {ret}".format(file=file, ret=ret))
1826 ERRORS += 1
1827
1828 ERRORS += verify(DATADIR, NEWPOOL, REP_NAME, db)
1829 else:
1830 logging.warning("SKIPPING IMPORT-RADOS TESTS DUE TO PREVIOUS FAILURES")
1831
1832 # Clear directories of previous portion
1833 call("/bin/rm -rf {dir}".format(dir=TESTDIR), shell=True)
1834 call("/bin/rm -rf {dir}".format(dir=DATADIR), shell=True)
1835 os.mkdir(TESTDIR)
1836 os.mkdir(DATADIR)
1837
1838 # Cause SPLIT_POOL to split and test import with object/log filtering
1839 print("Testing import all objects after a split")
1840 SPLIT_POOL = "split_pool"
1841 PG_COUNT = 1
1842 SPLIT_OBJ_COUNT = 5
1843 SPLIT_NSPACE_COUNT = 2
1844 SPLIT_NAME = "split"
1845 cmd = "{path}/ceph osd pool create {pool} {pg} {pg} replicated".format(pool=SPLIT_POOL, pg=PG_COUNT, path=CEPH_BIN)
1846 logging.debug(cmd)
1847 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1848 SPLITID = get_pool_id(SPLIT_POOL, nullfd)
1849 pool_size = int(check_output("{path}/ceph osd pool get {pool} size".format(pool=SPLIT_POOL, path=CEPH_BIN), shell=True, stderr=nullfd).split(" ")[1])
1850 EXP_ERRORS = 0
1851 RM_ERRORS = 0
1852 IMP_ERRORS = 0
1853
1854 objects = range(1, SPLIT_OBJ_COUNT + 1)
1855 nspaces = range(SPLIT_NSPACE_COUNT)
1856 for n in nspaces:
1857 nspace = get_nspace(n)
1858
1859 for i in objects:
1860 NAME = SPLIT_NAME + "{num}".format(num=i)
1861 LNAME = nspace + "-" + NAME
1862 DDNAME = os.path.join(DATADIR, LNAME)
1863 DDNAME += "__head"
1864
1865 cmd = "rm -f " + DDNAME
1866 logging.debug(cmd)
1867 call(cmd, shell=True)
1868
1869 if i == 1:
1870 dataline = range(DATALINECOUNT)
1871 else:
1872 dataline = range(1)
1873 fd = open(DDNAME, "w")
1874 data = "This is the split data for " + LNAME + "\n"
1875 for _ in dataline:
1876 fd.write(data)
1877 fd.close()
1878
1879 cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=SPLIT_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
1880 logging.debug(cmd)
1881 ret = call(cmd, shell=True, stderr=nullfd)
1882 if ret != 0:
1883 logging.critical("Rados put command failed with {ret}".format(ret=ret))
1884 return 1
1885
1886 wait_for_health()
1887 kill_daemons()
1888
1889 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1890 os.mkdir(os.path.join(TESTDIR, osd))
1891
1892 pg = "{pool}.0".format(pool=SPLITID)
1893 EXPORT_PG = pg
1894
1895 export_osds = get_osds(pg, OSDDIR)
1896 for osd in export_osds:
1897 mydir = os.path.join(TESTDIR, osd)
1898 fname = os.path.join(mydir, pg)
1899 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
1900 logging.debug(cmd)
1901 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1902 if ret != 0:
1903 logging.error("Exporting failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1904 EXP_ERRORS += 1
1905
1906 ERRORS += EXP_ERRORS
1907
1908 if EXP_ERRORS == 0:
1909 vstart(new=False)
1910 wait_for_health()
1911
1912 cmd = "{path}/ceph osd pool set {pool} pg_num 2".format(pool=SPLIT_POOL, path=CEPH_BIN)
1913 logging.debug(cmd)
1914 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1915 time.sleep(5)
1916 wait_for_health()
1917
1918 kill_daemons()
1919
1920 # Now 2 PGs, poolid.0 and poolid.1
1921 for seed in range(2):
1922 pg = "{pool}.{seed}".format(pool=SPLITID, seed=seed)
1923
1924 which = 0
1925 for osd in get_osds(pg, OSDDIR):
1926 cmd = (CFSD_PREFIX + "--op remove --pgid {pg}").format(pg=pg, osd=osd)
1927 logging.debug(cmd)
1928 ret = call(cmd, shell=True, stdout=nullfd)
1929
1930 # This is weird. The export files are based on only the EXPORT_PG
1931 # and where that pg was before the split. Use 'which' to use all
1932 # export copies in import.
1933 mydir = os.path.join(TESTDIR, export_osds[which])
1934 fname = os.path.join(mydir, EXPORT_PG)
1935 which += 1
1936 cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
1937 logging.debug(cmd)
1938 ret = call(cmd, shell=True, stdout=nullfd)
1939 if ret != 0:
1940 logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret))
1941 IMP_ERRORS += 1
1942
1943 ERRORS += IMP_ERRORS
1944
1945 # Start up again to make sure imports didn't corrupt anything
1946 if IMP_ERRORS == 0:
1947 print("Verify split import data")
1948 data_errors, count = check_data(DATADIR, TMPFILE, OSDDIR, SPLIT_NAME)
1949 ERRORS += data_errors
1950 if count != (SPLIT_OBJ_COUNT * SPLIT_NSPACE_COUNT * pool_size):
1951 logging.error("Incorrect number of replicas seen {count}".format(count=count))
1952 ERRORS += 1
1953 vstart(new=False)
1954 wait_for_health()
1955
1956 call("/bin/rm -rf {dir}".format(dir=TESTDIR), shell=True)
1957 call("/bin/rm -rf {dir}".format(dir=DATADIR), shell=True)
1958
1959 ERRORS += test_removeall(CFSD_PREFIX, db, OBJREPPGS, REP_POOL, CEPH_BIN, OSDDIR, REP_NAME, NUM_CLONED_REP_OBJECTS)
1960
1961 # vstart() starts 4 OSDs
1962 ERRORS += test_get_set_osdmap(CFSD_PREFIX, list(range(4)), ALLOSDS)
1963 ERRORS += test_get_set_inc_osdmap(CFSD_PREFIX, ALLOSDS[0])
1964 if ERRORS == 0:
1965 print("TEST PASSED")
1966 return 0
1967 else:
1968 print("TEST FAILED WITH {errcount} ERRORS".format(errcount=ERRORS))
1969 return 1
1970
1971
1972 def remove_btrfs_subvolumes(path):
1973 if platform.system() == "FreeBSD":
1974 return
1975 result = subprocess.Popen("stat -f -c '%%T' %s" % path, shell=True, stdout=subprocess.PIPE)
1976 for line in result.stdout:
1977 filesystem = decode(line).rstrip('\n')
1978 if filesystem == "btrfs":
1979 result = subprocess.Popen("sudo btrfs subvolume list %s" % path, shell=True, stdout=subprocess.PIPE)
1980 for line in result.stdout:
1981 subvolume = decode(line).split()[8]
1982 # extracting the relative volume name
1983 m = re.search(".*(%s.*)" % path, subvolume)
1984 if m:
1985 found = m.group(1)
1986 call("sudo btrfs subvolume delete %s" % found, shell=True)
1987
1988
1989 if __name__ == "__main__":
1990 status = 1
1991 try:
1992 status = main(sys.argv[1:])
1993 finally:
1994 kill_daemons()
1995 remove_btrfs_subvolumes(CEPH_DIR)
1996 call("/bin/rm -fr {dir}".format(dir=CEPH_DIR), shell=True)
1997 sys.exit(status)