]> git.proxmox.com Git - ceph.git/blob - ceph/qa/standalone/special/ceph_objectstore_tool.py
update sources to v12.2.5
[ceph.git] / ceph / qa / standalone / special / ceph_objectstore_tool.py
1 #!/usr/bin/env python
2
3 from __future__ import print_function
4 from subprocess import call
5 try:
6 from subprocess import check_output
7 except ImportError:
8 def check_output(*popenargs, **kwargs):
9 import subprocess
10 # backported from python 2.7 stdlib
11 process = subprocess.Popen(
12 stdout=subprocess.PIPE, *popenargs, **kwargs)
13 output, unused_err = process.communicate()
14 retcode = process.poll()
15 if retcode:
16 cmd = kwargs.get("args")
17 if cmd is None:
18 cmd = popenargs[0]
19 error = subprocess.CalledProcessError(retcode, cmd)
20 error.output = output
21 raise error
22 return output
23
24 import filecmp
25 import os
26 import subprocess
27 import math
28 import time
29 import sys
30 import re
31 import logging
32 import json
33 import tempfile
34 import platform
35
36 try:
37 from subprocess import DEVNULL
38 except ImportError:
39 DEVNULL = open(os.devnull, "wb")
40
41 logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.WARNING)
42
43
44 if sys.version_info[0] >= 3:
45 def decode(s):
46 return s.decode('utf-8')
47
48 def check_output(*args, **kwargs):
49 return decode(subprocess.check_output(*args, **kwargs))
50 else:
51 def decode(s):
52 return s
53
54
55
56 def wait_for_health():
57 print("Wait for health_ok...", end="")
58 tries = 0
59 while call("{path}/ceph health 2> /dev/null | grep -v 'HEALTH_OK\|HEALTH_WARN' > /dev/null".format(path=CEPH_BIN), shell=True) == 0:
60 tries += 1
61 if tries == 150:
62 raise Exception("Time exceeded to go to health")
63 time.sleep(1)
64 print("DONE")
65
66
67 def get_pool_id(name, nullfd):
68 cmd = "{path}/ceph osd pool stats {pool}".format(pool=name, path=CEPH_BIN).split()
69 # pool {pool} id # .... grab the 4 field
70 return check_output(cmd, stderr=nullfd).split()[3]
71
72
73 # return a list of unique PGS given an osd subdirectory
74 def get_osd_pgs(SUBDIR, ID):
75 PGS = []
76 if ID:
77 endhead = re.compile("{id}.*_head$".format(id=ID))
78 DIR = os.path.join(SUBDIR, "current")
79 PGS += [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and (ID is None or endhead.match(f))]
80 PGS = [re.sub("_head", "", p) for p in PGS if "_head" in p]
81 return PGS
82
83
84 # return a sorted list of unique PGs given a directory
85 def get_pgs(DIR, ID):
86 OSDS = [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and f.find("osd") == 0]
87 PGS = []
88 for d in OSDS:
89 SUBDIR = os.path.join(DIR, d)
90 PGS += get_osd_pgs(SUBDIR, ID)
91 return sorted(set(PGS))
92
93
94 # return a sorted list of PGS a subset of ALLPGS that contain objects with prefix specified
95 def get_objs(ALLPGS, prefix, DIR, ID):
96 OSDS = [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and f.find("osd") == 0]
97 PGS = []
98 for d in OSDS:
99 DIRL2 = os.path.join(DIR, d)
100 SUBDIR = os.path.join(DIRL2, "current")
101 for p in ALLPGS:
102 PGDIR = p + "_head"
103 if not os.path.isdir(os.path.join(SUBDIR, PGDIR)):
104 continue
105 FINALDIR = os.path.join(SUBDIR, PGDIR)
106 # See if there are any objects there
107 if any(f for f in [val for _, _, fl in os.walk(FINALDIR) for val in fl] if f.startswith(prefix)):
108 PGS += [p]
109 return sorted(set(PGS))
110
111
112 # return a sorted list of OSDS which have data from a given PG
113 def get_osds(PG, DIR):
114 ALLOSDS = [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and f.find("osd") == 0]
115 OSDS = []
116 for d in ALLOSDS:
117 DIRL2 = os.path.join(DIR, d)
118 SUBDIR = os.path.join(DIRL2, "current")
119 PGDIR = PG + "_head"
120 if not os.path.isdir(os.path.join(SUBDIR, PGDIR)):
121 continue
122 OSDS += [d]
123 return sorted(OSDS)
124
125
126 def get_lines(filename):
127 tmpfd = open(filename, "r")
128 line = True
129 lines = []
130 while line:
131 line = tmpfd.readline().rstrip('\n')
132 if line:
133 lines += [line]
134 tmpfd.close()
135 os.unlink(filename)
136 return lines
137
138
139 def cat_file(level, filename):
140 if level < logging.getLogger().getEffectiveLevel():
141 return
142 print("File: " + filename)
143 with open(filename, "r") as f:
144 while True:
145 line = f.readline().rstrip('\n')
146 if not line:
147 break
148 print(line)
149 print("<EOF>")
150
151
152 def vstart(new, opt=""):
153 print("vstarting....", end="")
154 NEW = new and "-n" or "-N"
155 call("MON=1 OSD=4 MDS=0 MGR=1 CEPH_PORT=7400 {path}/src/vstart.sh --short -l {new} -d {opt} > /dev/null 2>&1".format(new=NEW, opt=opt, path=CEPH_ROOT), shell=True)
156 print("DONE")
157
158
159 def test_failure(cmd, errmsg, tty=False):
160 if tty:
161 try:
162 ttyfd = open("/dev/tty", "rwb")
163 except Exception as e:
164 logging.info(str(e))
165 logging.info("SKIP " + cmd)
166 return 0
167 TMPFILE = r"/tmp/tmp.{pid}".format(pid=os.getpid())
168 tmpfd = open(TMPFILE, "wb")
169
170 logging.debug(cmd)
171 if tty:
172 ret = call(cmd, shell=True, stdin=ttyfd, stdout=ttyfd, stderr=tmpfd)
173 ttyfd.close()
174 else:
175 ret = call(cmd, shell=True, stderr=tmpfd)
176 tmpfd.close()
177 if ret == 0:
178 logging.error(cmd)
179 logging.error("Should have failed, but got exit 0")
180 return 1
181 lines = get_lines(TMPFILE)
182 matched = [ l for l in lines if errmsg in l ]
183 if any(matched):
184 logging.info("Correctly failed with message \"" + matched[0] + "\"")
185 return 0
186 else:
187 logging.error("Command: " + cmd )
188 logging.error("Bad messages to stderr \"" + str(lines) + "\"")
189 logging.error("Expected \"" + errmsg + "\"")
190 return 1
191
192
193 def get_nspace(num):
194 if num == 0:
195 return ""
196 return "ns{num}".format(num=num)
197
198
199 def verify(DATADIR, POOL, NAME_PREFIX, db):
200 TMPFILE = r"/tmp/tmp.{pid}".format(pid=os.getpid())
201 ERRORS = 0
202 for rawnsfile in [f for f in os.listdir(DATADIR) if f.split('-')[1].find(NAME_PREFIX) == 0]:
203 nsfile = rawnsfile.split("__")[0]
204 clone = rawnsfile.split("__")[1]
205 nspace = nsfile.split("-")[0]
206 file = nsfile.split("-")[1]
207 # Skip clones
208 if clone != "head":
209 continue
210 path = os.path.join(DATADIR, rawnsfile)
211 try:
212 os.unlink(TMPFILE)
213 except:
214 pass
215 cmd = "{path}/rados -p {pool} -N '{nspace}' get {file} {out}".format(pool=POOL, file=file, out=TMPFILE, nspace=nspace, path=CEPH_BIN)
216 logging.debug(cmd)
217 call(cmd, shell=True, stdout=DEVNULL, stderr=DEVNULL)
218 cmd = "diff -q {src} {result}".format(src=path, result=TMPFILE)
219 logging.debug(cmd)
220 ret = call(cmd, shell=True)
221 if ret != 0:
222 logging.error("{file} data not imported properly".format(file=file))
223 ERRORS += 1
224 try:
225 os.unlink(TMPFILE)
226 except:
227 pass
228 for key, val in db[nspace][file]["xattr"].items():
229 cmd = "{path}/rados -p {pool} -N '{nspace}' getxattr {name} {key}".format(pool=POOL, name=file, key=key, nspace=nspace, path=CEPH_BIN)
230 logging.debug(cmd)
231 getval = check_output(cmd, shell=True, stderr=DEVNULL)
232 logging.debug("getxattr {key} {val}".format(key=key, val=getval))
233 if getval != val:
234 logging.error("getxattr of key {key} returned wrong val: {get} instead of {orig}".format(key=key, get=getval, orig=val))
235 ERRORS += 1
236 continue
237 hdr = db[nspace][file].get("omapheader", "")
238 cmd = "{path}/rados -p {pool} -N '{nspace}' getomapheader {name} {file}".format(pool=POOL, name=file, nspace=nspace, file=TMPFILE, path=CEPH_BIN)
239 logging.debug(cmd)
240 ret = call(cmd, shell=True, stderr=DEVNULL)
241 if ret != 0:
242 logging.error("rados getomapheader returned {ret}".format(ret=ret))
243 ERRORS += 1
244 else:
245 getlines = get_lines(TMPFILE)
246 assert(len(getlines) == 0 or len(getlines) == 1)
247 if len(getlines) == 0:
248 gethdr = ""
249 else:
250 gethdr = getlines[0]
251 logging.debug("header: {hdr}".format(hdr=gethdr))
252 if gethdr != hdr:
253 logging.error("getomapheader returned wrong val: {get} instead of {orig}".format(get=gethdr, orig=hdr))
254 ERRORS += 1
255 for key, val in db[nspace][file]["omap"].items():
256 cmd = "{path}/rados -p {pool} -N '{nspace}' getomapval {name} {key} {file}".format(pool=POOL, name=file, key=key, nspace=nspace, file=TMPFILE, path=CEPH_BIN)
257 logging.debug(cmd)
258 ret = call(cmd, shell=True, stderr=DEVNULL)
259 if ret != 0:
260 logging.error("getomapval returned {ret}".format(ret=ret))
261 ERRORS += 1
262 continue
263 getlines = get_lines(TMPFILE)
264 if len(getlines) != 1:
265 logging.error("Bad data from getomapval {lines}".format(lines=getlines))
266 ERRORS += 1
267 continue
268 getval = getlines[0]
269 logging.debug("getomapval {key} {val}".format(key=key, val=getval))
270 if getval != val:
271 logging.error("getomapval returned wrong val: {get} instead of {orig}".format(get=getval, orig=val))
272 ERRORS += 1
273 try:
274 os.unlink(TMPFILE)
275 except:
276 pass
277 return ERRORS
278
279
280 def check_journal(jsondict):
281 errors = 0
282 if 'header' not in jsondict:
283 logging.error("Key 'header' not in dump-journal")
284 errors += 1
285 elif 'max_size' not in jsondict['header']:
286 logging.error("Key 'max_size' not in dump-journal header")
287 errors += 1
288 else:
289 print("\tJournal max_size = {size}".format(size=jsondict['header']['max_size']))
290 if 'entries' not in jsondict:
291 logging.error("Key 'entries' not in dump-journal output")
292 errors += 1
293 elif len(jsondict['entries']) == 0:
294 logging.info("No entries in journal found")
295 else:
296 errors += check_journal_entries(jsondict['entries'])
297 return errors
298
299
300 def check_journal_entries(entries):
301 errors = 0
302 for enum in range(len(entries)):
303 if 'offset' not in entries[enum]:
304 logging.error("No 'offset' key in entry {e}".format(e=enum))
305 errors += 1
306 if 'seq' not in entries[enum]:
307 logging.error("No 'seq' key in entry {e}".format(e=enum))
308 errors += 1
309 if 'transactions' not in entries[enum]:
310 logging.error("No 'transactions' key in entry {e}".format(e=enum))
311 errors += 1
312 elif len(entries[enum]['transactions']) == 0:
313 logging.error("No transactions found in entry {e}".format(e=enum))
314 errors += 1
315 else:
316 errors += check_entry_transactions(entries[enum], enum)
317 return errors
318
319
320 def check_entry_transactions(entry, enum):
321 errors = 0
322 for tnum in range(len(entry['transactions'])):
323 if 'trans_num' not in entry['transactions'][tnum]:
324 logging.error("Key 'trans_num' missing from entry {e} trans {t}".format(e=enum, t=tnum))
325 errors += 1
326 elif entry['transactions'][tnum]['trans_num'] != tnum:
327 ft = entry['transactions'][tnum]['trans_num']
328 logging.error("Bad trans_num ({ft}) entry {e} trans {t}".format(ft=ft, e=enum, t=tnum))
329 errors += 1
330 if 'ops' not in entry['transactions'][tnum]:
331 logging.error("Key 'ops' missing from entry {e} trans {t}".format(e=enum, t=tnum))
332 errors += 1
333 else:
334 errors += check_transaction_ops(entry['transactions'][tnum]['ops'], enum, tnum)
335 return errors
336
337
338 def check_transaction_ops(ops, enum, tnum):
339 if len(ops) is 0:
340 logging.warning("No ops found in entry {e} trans {t}".format(e=enum, t=tnum))
341 errors = 0
342 for onum in range(len(ops)):
343 if 'op_num' not in ops[onum]:
344 logging.error("Key 'op_num' missing from entry {e} trans {t} op {o}".format(e=enum, t=tnum, o=onum))
345 errors += 1
346 elif ops[onum]['op_num'] != onum:
347 fo = ops[onum]['op_num']
348 logging.error("Bad op_num ({fo}) from entry {e} trans {t} op {o}".format(fo=fo, e=enum, t=tnum, o=onum))
349 errors += 1
350 if 'op_name' not in ops[onum]:
351 logging.error("Key 'op_name' missing from entry {e} trans {t} op {o}".format(e=enum, t=tnum, o=onum))
352 errors += 1
353 return errors
354
355
356 def test_dump_journal(CFSD_PREFIX, osds):
357 ERRORS = 0
358 pid = os.getpid()
359 TMPFILE = r"/tmp/tmp.{pid}".format(pid=pid)
360
361 for osd in osds:
362 # Test --op dump-journal by loading json
363 cmd = (CFSD_PREFIX + "--op dump-journal --format json").format(osd=osd)
364 logging.debug(cmd)
365 tmpfd = open(TMPFILE, "wb")
366 ret = call(cmd, shell=True, stdout=tmpfd)
367 if ret != 0:
368 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
369 ERRORS += 1
370 continue
371 tmpfd.close()
372 tmpfd = open(TMPFILE, "r")
373 jsondict = json.load(tmpfd)
374 tmpfd.close()
375 os.unlink(TMPFILE)
376
377 journal_errors = check_journal(jsondict)
378 if journal_errors is not 0:
379 logging.error(jsondict)
380 ERRORS += journal_errors
381
382 return ERRORS
383
384 CEPH_BUILD_DIR = os.environ.get('CEPH_BUILD_DIR')
385 CEPH_BIN = os.environ.get('CEPH_BIN')
386 CEPH_ROOT = os.environ.get('CEPH_ROOT')
387
388 if not CEPH_BUILD_DIR:
389 CEPH_BUILD_DIR=os.getcwd()
390 os.putenv('CEPH_BUILD_DIR', CEPH_BUILD_DIR)
391 CEPH_BIN=os.path.join(CEPH_BUILD_DIR, 'bin')
392 os.putenv('CEPH_BIN', CEPH_BIN)
393 CEPH_ROOT=os.path.dirname(CEPH_BUILD_DIR)
394 os.putenv('CEPH_ROOT', CEPH_ROOT)
395 CEPH_LIB=os.path.join(CEPH_BUILD_DIR, 'lib')
396 os.putenv('CEPH_LIB', CEPH_LIB)
397
398 try:
399 os.mkdir("td")
400 except:
401 pass # ok if this is already there
402 CEPH_DIR = os.path.join(CEPH_BUILD_DIR, os.path.join("td", "cot_dir"))
403 CEPH_CONF = os.path.join(CEPH_DIR, 'ceph.conf')
404
405 def kill_daemons():
406 call("{path}/init-ceph -c {conf} stop > /dev/null 2>&1".format(conf=CEPH_CONF, path=CEPH_BIN), shell=True)
407
408
409 def check_data(DATADIR, TMPFILE, OSDDIR, SPLIT_NAME):
410 repcount = 0
411 ERRORS = 0
412 for rawnsfile in [f for f in os.listdir(DATADIR) if f.split('-')[1].find(SPLIT_NAME) == 0]:
413 nsfile = rawnsfile.split("__")[0]
414 clone = rawnsfile.split("__")[1]
415 nspace = nsfile.split("-")[0]
416 file = nsfile.split("-")[1] + "__" + clone
417 # Skip clones
418 if clone != "head":
419 continue
420 path = os.path.join(DATADIR, rawnsfile)
421 tmpfd = open(TMPFILE, "wb")
422 cmd = "find {dir} -name '{file}_*_{nspace}_*'".format(dir=OSDDIR, file=file, nspace=nspace)
423 logging.debug(cmd)
424 ret = call(cmd, shell=True, stdout=tmpfd)
425 if ret:
426 logging.critical("INTERNAL ERROR")
427 return 1
428 tmpfd.close()
429 obj_locs = get_lines(TMPFILE)
430 if len(obj_locs) == 0:
431 logging.error("Can't find imported object {name}".format(name=file))
432 ERRORS += 1
433 for obj_loc in obj_locs:
434 # For btrfs skip snap_* dirs
435 if re.search("/snap_[0-9]*/", obj_loc) is not None:
436 continue
437 repcount += 1
438 cmd = "diff -q {src} {obj_loc}".format(src=path, obj_loc=obj_loc)
439 logging.debug(cmd)
440 ret = call(cmd, shell=True)
441 if ret != 0:
442 logging.error("{file} data not imported properly into {obj}".format(file=file, obj=obj_loc))
443 ERRORS += 1
444 return ERRORS, repcount
445
446
447 def set_osd_weight(CFSD_PREFIX, osd_ids, osd_path, weight):
448 # change the weight of osd.0 to math.pi in the newest osdmap of given osd
449 osdmap_file = tempfile.NamedTemporaryFile(delete=True)
450 cmd = (CFSD_PREFIX + "--op get-osdmap --file {osdmap_file}").format(osd=osd_path,
451 osdmap_file=osdmap_file.name)
452 output = check_output(cmd, shell=True)
453 epoch = int(re.findall('#(\d+)', output)[0])
454
455 new_crush_file = tempfile.NamedTemporaryFile(delete=True)
456 old_crush_file = tempfile.NamedTemporaryFile(delete=True)
457 ret = call("{path}/osdmaptool --export-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name,
458 crush_file=old_crush_file.name, path=CEPH_BIN),
459 stdout=DEVNULL,
460 stderr=DEVNULL,
461 shell=True)
462 assert(ret == 0)
463
464 for osd_id in osd_ids:
465 cmd = "{path}/crushtool -i {crush_file} --reweight-item osd.{osd} {weight} -o {new_crush_file}".format(osd=osd_id,
466 crush_file=old_crush_file.name,
467 weight=weight,
468 new_crush_file=new_crush_file.name, path=CEPH_BIN)
469 ret = call(cmd, stdout=DEVNULL, shell=True)
470 assert(ret == 0)
471 old_crush_file, new_crush_file = new_crush_file, old_crush_file
472
473 # change them back, since we don't need to preapre for another round
474 old_crush_file, new_crush_file = new_crush_file, old_crush_file
475 old_crush_file.close()
476
477 ret = call("{path}/osdmaptool --import-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name,
478 crush_file=new_crush_file.name, path=CEPH_BIN),
479 stdout=DEVNULL,
480 stderr=DEVNULL,
481 shell=True)
482 assert(ret == 0)
483
484 # Minimum test of --dry-run by using it, but not checking anything
485 cmd = CFSD_PREFIX + "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force --dry-run"
486 cmd = cmd.format(osd=osd_path, osdmap_file=osdmap_file.name, epoch=epoch)
487 ret = call(cmd, stdout=DEVNULL, shell=True)
488 assert(ret == 0)
489
490 # osdmaptool increases the epoch of the changed osdmap, so we need to force the tool
491 # to use use a different epoch than the one in osdmap
492 cmd = CFSD_PREFIX + "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force"
493 cmd = cmd.format(osd=osd_path, osdmap_file=osdmap_file.name, epoch=epoch)
494 ret = call(cmd, stdout=DEVNULL, shell=True)
495
496 return ret == 0
497
498 def get_osd_weights(CFSD_PREFIX, osd_ids, osd_path):
499 osdmap_file = tempfile.NamedTemporaryFile(delete=True)
500 cmd = (CFSD_PREFIX + "--op get-osdmap --file {osdmap_file}").format(osd=osd_path,
501 osdmap_file=osdmap_file.name)
502 ret = call(cmd, stdout=DEVNULL, shell=True)
503 if ret != 0:
504 return None
505 # we have to read the weights from the crush map, even we can query the weights using
506 # osdmaptool, but please keep in mind, they are different:
507 # item weights in crush map versus weight associated with each osd in osdmap
508 crush_file = tempfile.NamedTemporaryFile(delete=True)
509 ret = call("{path}/osdmaptool --export-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name,
510 crush_file=crush_file.name, path=CEPH_BIN),
511 stdout=DEVNULL,
512 shell=True)
513 assert(ret == 0)
514 output = check_output("{path}/crushtool --tree -i {crush_file} | tail -n {num_osd}".format(crush_file=crush_file.name,
515 num_osd=len(osd_ids), path=CEPH_BIN),
516 stderr=DEVNULL,
517 shell=True)
518 weights = []
519 for line in output.strip().split('\n'):
520 print(line)
521 linev = re.split('\s+', line)
522 if linev[0] is '':
523 linev.pop(0)
524 print('linev %s' % linev)
525 weights.append(float(linev[2]))
526
527 return weights
528
529
530 def test_get_set_osdmap(CFSD_PREFIX, osd_ids, osd_paths):
531 print("Testing get-osdmap and set-osdmap")
532 errors = 0
533 kill_daemons()
534 weight = 1 / math.e # just some magic number in [0, 1]
535 changed = []
536 for osd_path in osd_paths:
537 if set_osd_weight(CFSD_PREFIX, osd_ids, osd_path, weight):
538 changed.append(osd_path)
539 else:
540 logging.warning("Failed to change the weights: {0}".format(osd_path))
541 # i am pissed off if none of the store gets changed
542 if not changed:
543 errors += 1
544
545 for osd_path in changed:
546 weights = get_osd_weights(CFSD_PREFIX, osd_ids, osd_path)
547 if not weights:
548 errors += 1
549 continue
550 if any(abs(w - weight) > 1e-5 for w in weights):
551 logging.warning("Weight is not changed: {0} != {1}".format(weights, weight))
552 errors += 1
553 return errors
554
555 def test_get_set_inc_osdmap(CFSD_PREFIX, osd_path):
556 # incrementals are not used unless we need to build an MOSDMap to update
557 # OSD's peers, so an obvious way to test it is simply overwrite an epoch
558 # with a different copy, and read it back to see if it matches.
559 kill_daemons()
560 file_e2 = tempfile.NamedTemporaryFile(delete=True)
561 cmd = (CFSD_PREFIX + "--op get-inc-osdmap --file {file}").format(osd=osd_path,
562 file=file_e2.name)
563 output = check_output(cmd, shell=True)
564 epoch = int(re.findall('#(\d+)', output)[0])
565 # backup e1 incremental before overwriting it
566 epoch -= 1
567 file_e1_backup = tempfile.NamedTemporaryFile(delete=True)
568 cmd = CFSD_PREFIX + "--op get-inc-osdmap --epoch {epoch} --file {file}"
569 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_backup.name), shell=True)
570 if ret: return 1
571 # overwrite e1 with e2
572 cmd = CFSD_PREFIX + "--op set-inc-osdmap --force --epoch {epoch} --file {file}"
573 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e2.name), shell=True)
574 if ret: return 1
575 # Use dry-run to set back to e1 which shouldn't happen
576 cmd = CFSD_PREFIX + "--op set-inc-osdmap --dry-run --epoch {epoch} --file {file}"
577 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_backup.name), shell=True)
578 if ret: return 1
579 # read from e1
580 file_e1_read = tempfile.NamedTemporaryFile(delete=True)
581 cmd = CFSD_PREFIX + "--op get-inc-osdmap --epoch {epoch} --file {file}"
582 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_read.name), shell=True)
583 if ret: return 1
584 errors = 0
585 try:
586 if not filecmp.cmp(file_e2.name, file_e1_read.name, shallow=False):
587 logging.error("{{get,set}}-inc-osdmap mismatch {0} != {1}".format(file_e2.name, file_e1_read.name))
588 errors += 1
589 finally:
590 # revert the change with file_e1_backup
591 cmd = CFSD_PREFIX + "--op set-inc-osdmap --epoch {epoch} --file {file}"
592 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_backup.name), shell=True)
593 if ret:
594 logging.error("Failed to revert the changed inc-osdmap")
595 errors += 1
596
597 return errors
598
599
600 def test_removeall(CFSD_PREFIX, db, OBJREPPGS, REP_POOL, CEPH_BIN, OSDDIR, REP_NAME, NUM_CLONED_REP_OBJECTS):
601 # Test removeall
602 TMPFILE = r"/tmp/tmp.{pid}".format(pid=os.getpid())
603 nullfd = open(os.devnull, "w")
604 errors=0
605 print("Test removeall")
606 kill_daemons()
607 for nspace in db.keys():
608 for basename in db[nspace].keys():
609 JSON = db[nspace][basename]['json']
610 for pg in OBJREPPGS:
611 OSDS = get_osds(pg, OSDDIR)
612 for osd in OSDS:
613 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
614 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
615 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
616 if not fnames:
617 continue
618
619 if int(basename.split(REP_NAME)[1]) <= int(NUM_CLONED_REP_OBJECTS):
620 cmd = (CFSD_PREFIX + "'{json}' remove").format(osd=osd, json=JSON)
621 errors += test_failure(cmd, "Snapshots are present, use removeall to delete everything")
622
623 cmd = (CFSD_PREFIX + " --force --dry-run '{json}' remove").format(osd=osd, json=JSON)
624 logging.debug(cmd)
625 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
626 if ret != 0:
627 logging.error("remove with --force failed for {json}".format(json=JSON))
628 errors += 1
629
630 cmd = (CFSD_PREFIX + " --dry-run '{json}' removeall").format(osd=osd, json=JSON)
631 logging.debug(cmd)
632 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
633 if ret != 0:
634 logging.error("removeall failed for {json}".format(json=JSON))
635 errors += 1
636
637 cmd = (CFSD_PREFIX + " '{json}' removeall").format(osd=osd, json=JSON)
638 logging.debug(cmd)
639 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
640 if ret != 0:
641 logging.error("removeall failed for {json}".format(json=JSON))
642 errors += 1
643
644 tmpfd = open(TMPFILE, "w")
645 cmd = (CFSD_PREFIX + "--op list --pgid {pg} --namespace {ns} {name}").format(osd=osd, pg=pg, ns=nspace, name=basename)
646 logging.debug(cmd)
647 ret = call(cmd, shell=True, stdout=tmpfd)
648 if ret != 0:
649 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
650 errors += 1
651 tmpfd.close()
652 lines = get_lines(TMPFILE)
653 if len(lines) != 0:
654 logging.error("Removeall didn't remove all objects {ns}/{name} : {lines}".format(ns=nspace, name=basename, lines=lines))
655 errors += 1
656 vstart(new=False)
657 wait_for_health()
658 cmd = "{path}/rados -p {pool} rmsnap snap1".format(pool=REP_POOL, path=CEPH_BIN)
659 logging.debug(cmd)
660 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
661 if ret != 0:
662 logging.error("rados rmsnap failed")
663 errors += 1
664 time.sleep(2)
665 wait_for_health()
666 return errors
667
668
669 def main(argv):
670 if sys.version_info[0] < 3:
671 sys.stdout = stdout = os.fdopen(sys.stdout.fileno(), 'wb', 0)
672 else:
673 stdout = sys.stdout.buffer
674 if len(argv) > 1 and argv[1] == "debug":
675 nullfd = stdout
676 else:
677 nullfd = DEVNULL
678
679 call("rm -fr {dir}; mkdir -p {dir}".format(dir=CEPH_DIR), shell=True)
680 os.chdir(CEPH_DIR)
681 os.environ["CEPH_DIR"] = CEPH_DIR
682 OSDDIR = "dev"
683 REP_POOL = "rep_pool"
684 REP_NAME = "REPobject"
685 EC_POOL = "ec_pool"
686 EC_NAME = "ECobject"
687 if len(argv) > 0 and argv[0] == 'large':
688 PG_COUNT = 12
689 NUM_REP_OBJECTS = 800
690 NUM_CLONED_REP_OBJECTS = 100
691 NUM_EC_OBJECTS = 12
692 NUM_NSPACES = 4
693 # Larger data sets for first object per namespace
694 DATALINECOUNT = 50000
695 # Number of objects to do xattr/omap testing on
696 ATTR_OBJS = 10
697 else:
698 PG_COUNT = 4
699 NUM_REP_OBJECTS = 2
700 NUM_CLONED_REP_OBJECTS = 2
701 NUM_EC_OBJECTS = 2
702 NUM_NSPACES = 2
703 # Larger data sets for first object per namespace
704 DATALINECOUNT = 10
705 # Number of objects to do xattr/omap testing on
706 ATTR_OBJS = 2
707 ERRORS = 0
708 pid = os.getpid()
709 TESTDIR = "/tmp/test.{pid}".format(pid=pid)
710 DATADIR = "/tmp/data.{pid}".format(pid=pid)
711 CFSD_PREFIX = CEPH_BIN + "/ceph-objectstore-tool --data-path " + OSDDIR + "/{osd} "
712 PROFNAME = "testecprofile"
713
714 os.environ['CEPH_CONF'] = CEPH_CONF
715 vstart(new=True)
716 wait_for_health()
717
718 cmd = "{path}/ceph osd pool create {pool} {pg} {pg} replicated".format(pool=REP_POOL, pg=PG_COUNT, path=CEPH_BIN)
719 logging.debug(cmd)
720 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
721 time.sleep(2)
722 REPID = get_pool_id(REP_POOL, nullfd)
723
724 print("Created Replicated pool #{repid}".format(repid=REPID))
725
726 cmd = "{path}/ceph osd erasure-code-profile set {prof} crush-failure-domain=osd".format(prof=PROFNAME, path=CEPH_BIN)
727 logging.debug(cmd)
728 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
729 cmd = "{path}/ceph osd erasure-code-profile get {prof}".format(prof=PROFNAME, path=CEPH_BIN)
730 logging.debug(cmd)
731 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
732 cmd = "{path}/ceph osd pool create {pool} {pg} {pg} erasure {prof}".format(pool=EC_POOL, prof=PROFNAME, pg=PG_COUNT, path=CEPH_BIN)
733 logging.debug(cmd)
734 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
735 ECID = get_pool_id(EC_POOL, nullfd)
736
737 print("Created Erasure coded pool #{ecid}".format(ecid=ECID))
738
739 print("Creating {objs} objects in replicated pool".format(objs=(NUM_REP_OBJECTS*NUM_NSPACES)))
740 cmd = "mkdir -p {datadir}".format(datadir=DATADIR)
741 logging.debug(cmd)
742 call(cmd, shell=True)
743
744 db = {}
745
746 objects = range(1, NUM_REP_OBJECTS + 1)
747 nspaces = range(NUM_NSPACES)
748 for n in nspaces:
749 nspace = get_nspace(n)
750
751 db[nspace] = {}
752
753 for i in objects:
754 NAME = REP_NAME + "{num}".format(num=i)
755 LNAME = nspace + "-" + NAME
756 DDNAME = os.path.join(DATADIR, LNAME)
757 DDNAME += "__head"
758
759 cmd = "rm -f " + DDNAME
760 logging.debug(cmd)
761 call(cmd, shell=True)
762
763 if i == 1:
764 dataline = range(DATALINECOUNT)
765 else:
766 dataline = range(1)
767 fd = open(DDNAME, "w")
768 data = "This is the replicated data for " + LNAME + "\n"
769 for _ in dataline:
770 fd.write(data)
771 fd.close()
772
773 cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=REP_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
774 logging.debug(cmd)
775 ret = call(cmd, shell=True, stderr=nullfd)
776 if ret != 0:
777 logging.critical("Rados put command failed with {ret}".format(ret=ret))
778 return 1
779
780 db[nspace][NAME] = {}
781
782 if i < ATTR_OBJS + 1:
783 keys = range(i)
784 else:
785 keys = range(0)
786 db[nspace][NAME]["xattr"] = {}
787 for k in keys:
788 if k == 0:
789 continue
790 mykey = "key{i}-{k}".format(i=i, k=k)
791 myval = "val{i}-{k}".format(i=i, k=k)
792 cmd = "{path}/rados -p {pool} -N '{nspace}' setxattr {name} {key} {val}".format(pool=REP_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN)
793 logging.debug(cmd)
794 ret = call(cmd, shell=True)
795 if ret != 0:
796 logging.error("setxattr failed with {ret}".format(ret=ret))
797 ERRORS += 1
798 db[nspace][NAME]["xattr"][mykey] = myval
799
800 # Create omap header in all objects but REPobject1
801 if i < ATTR_OBJS + 1 and i != 1:
802 myhdr = "hdr{i}".format(i=i)
803 cmd = "{path}/rados -p {pool} -N '{nspace}' setomapheader {name} {hdr}".format(pool=REP_POOL, name=NAME, hdr=myhdr, nspace=nspace, path=CEPH_BIN)
804 logging.debug(cmd)
805 ret = call(cmd, shell=True)
806 if ret != 0:
807 logging.critical("setomapheader failed with {ret}".format(ret=ret))
808 ERRORS += 1
809 db[nspace][NAME]["omapheader"] = myhdr
810
811 db[nspace][NAME]["omap"] = {}
812 for k in keys:
813 if k == 0:
814 continue
815 mykey = "okey{i}-{k}".format(i=i, k=k)
816 myval = "oval{i}-{k}".format(i=i, k=k)
817 cmd = "{path}/rados -p {pool} -N '{nspace}' setomapval {name} {key} {val}".format(pool=REP_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN)
818 logging.debug(cmd)
819 ret = call(cmd, shell=True)
820 if ret != 0:
821 logging.critical("setomapval failed with {ret}".format(ret=ret))
822 db[nspace][NAME]["omap"][mykey] = myval
823
824 # Create some clones
825 cmd = "{path}/rados -p {pool} mksnap snap1".format(pool=REP_POOL, path=CEPH_BIN)
826 logging.debug(cmd)
827 call(cmd, shell=True)
828
829 objects = range(1, NUM_CLONED_REP_OBJECTS + 1)
830 nspaces = range(NUM_NSPACES)
831 for n in nspaces:
832 nspace = get_nspace(n)
833
834 for i in objects:
835 NAME = REP_NAME + "{num}".format(num=i)
836 LNAME = nspace + "-" + NAME
837 DDNAME = os.path.join(DATADIR, LNAME)
838 # First clone
839 CLONENAME = DDNAME + "__1"
840 DDNAME += "__head"
841
842 cmd = "mv -f " + DDNAME + " " + CLONENAME
843 logging.debug(cmd)
844 call(cmd, shell=True)
845
846 if i == 1:
847 dataline = range(DATALINECOUNT)
848 else:
849 dataline = range(1)
850 fd = open(DDNAME, "w")
851 data = "This is the replicated data after a snapshot for " + LNAME + "\n"
852 for _ in dataline:
853 fd.write(data)
854 fd.close()
855
856 cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=REP_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
857 logging.debug(cmd)
858 ret = call(cmd, shell=True, stderr=nullfd)
859 if ret != 0:
860 logging.critical("Rados put command failed with {ret}".format(ret=ret))
861 return 1
862
863 print("Creating {objs} objects in erasure coded pool".format(objs=(NUM_EC_OBJECTS*NUM_NSPACES)))
864
865 objects = range(1, NUM_EC_OBJECTS + 1)
866 nspaces = range(NUM_NSPACES)
867 for n in nspaces:
868 nspace = get_nspace(n)
869
870 for i in objects:
871 NAME = EC_NAME + "{num}".format(num=i)
872 LNAME = nspace + "-" + NAME
873 DDNAME = os.path.join(DATADIR, LNAME)
874 DDNAME += "__head"
875
876 cmd = "rm -f " + DDNAME
877 logging.debug(cmd)
878 call(cmd, shell=True)
879
880 if i == 1:
881 dataline = range(DATALINECOUNT)
882 else:
883 dataline = range(1)
884 fd = open(DDNAME, "w")
885 data = "This is the erasure coded data for " + LNAME + "\n"
886 for j in dataline:
887 fd.write(data)
888 fd.close()
889
890 cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=EC_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
891 logging.debug(cmd)
892 ret = call(cmd, shell=True, stderr=nullfd)
893 if ret != 0:
894 logging.critical("Erasure coded pool creation failed with {ret}".format(ret=ret))
895 return 1
896
897 db[nspace][NAME] = {}
898
899 db[nspace][NAME]["xattr"] = {}
900 if i < ATTR_OBJS + 1:
901 keys = range(i)
902 else:
903 keys = range(0)
904 for k in keys:
905 if k == 0:
906 continue
907 mykey = "key{i}-{k}".format(i=i, k=k)
908 myval = "val{i}-{k}".format(i=i, k=k)
909 cmd = "{path}/rados -p {pool} -N '{nspace}' setxattr {name} {key} {val}".format(pool=EC_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN)
910 logging.debug(cmd)
911 ret = call(cmd, shell=True)
912 if ret != 0:
913 logging.error("setxattr failed with {ret}".format(ret=ret))
914 ERRORS += 1
915 db[nspace][NAME]["xattr"][mykey] = myval
916
917 # Omap isn't supported in EC pools
918 db[nspace][NAME]["omap"] = {}
919
920 logging.debug(db)
921
922 kill_daemons()
923
924 if ERRORS:
925 logging.critical("Unable to set up test")
926 return 1
927
928 ALLREPPGS = get_pgs(OSDDIR, REPID)
929 logging.debug(ALLREPPGS)
930 ALLECPGS = get_pgs(OSDDIR, ECID)
931 logging.debug(ALLECPGS)
932
933 OBJREPPGS = get_objs(ALLREPPGS, REP_NAME, OSDDIR, REPID)
934 logging.debug(OBJREPPGS)
935 OBJECPGS = get_objs(ALLECPGS, EC_NAME, OSDDIR, ECID)
936 logging.debug(OBJECPGS)
937
938 ONEPG = ALLREPPGS[0]
939 logging.debug(ONEPG)
940 osds = get_osds(ONEPG, OSDDIR)
941 ONEOSD = osds[0]
942 logging.debug(ONEOSD)
943
944 print("Test invalid parameters")
945 # On export can't use stdout to a terminal
946 cmd = (CFSD_PREFIX + "--op export --pgid {pg}").format(osd=ONEOSD, pg=ONEPG)
947 ERRORS += test_failure(cmd, "stdout is a tty and no --file filename specified", tty=True)
948
949 # On export can't use stdout to a terminal
950 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file -").format(osd=ONEOSD, pg=ONEPG)
951 ERRORS += test_failure(cmd, "stdout is a tty and no --file filename specified", tty=True)
952
953 # Prep a valid ec export file for import failure tests
954 ONEECPG = ALLECPGS[0]
955 osds = get_osds(ONEECPG, OSDDIR)
956 ONEECOSD = osds[0]
957 OTHERFILE = "/tmp/foo.{pid}".format(pid=pid)
958 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=ONEECOSD, pg=ONEECPG, file=OTHERFILE)
959 logging.debug(cmd)
960 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
961
962 # On import can't specify a different shard
963 BADPG = ONEECPG.split('s')[0] + "s10"
964 cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=ONEECOSD, pg=BADPG, file=OTHERFILE)
965 ERRORS += test_failure(cmd, "Can't specify a different shard, must be")
966
967 os.unlink(OTHERFILE)
968
969 # Prep a valid export file for import failure tests
970 OTHERFILE = "/tmp/foo.{pid}".format(pid=pid)
971 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=ONEOSD, pg=ONEPG, file=OTHERFILE)
972 logging.debug(cmd)
973 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
974
975 # On import can't specify a PG with a non-existent pool
976 cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=ONEOSD, pg="10.0", file=OTHERFILE)
977 ERRORS += test_failure(cmd, "Can't specify a different pgid pool, must be")
978
979 # On import can't specify shard for a replicated export
980 cmd = (CFSD_PREFIX + "--op import --pgid {pg}s0 --file {file}").format(osd=ONEOSD, pg=ONEPG, file=OTHERFILE)
981 ERRORS += test_failure(cmd, "Can't specify a sharded pgid with a non-sharded export")
982
983 # On import can't specify a PG with a bad seed
984 TMPPG="{pool}.80".format(pool=REPID)
985 cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=ONEOSD, pg=TMPPG, file=OTHERFILE)
986 ERRORS += test_failure(cmd, "Illegal pgid, the seed is larger than current pg_num")
987
988 os.unlink(OTHERFILE)
989 cmd = (CFSD_PREFIX + "--op import --file {FOO}").format(osd=ONEOSD, FOO=OTHERFILE)
990 ERRORS += test_failure(cmd, "file: {FOO}: No such file or directory".format(FOO=OTHERFILE))
991
992 cmd = "{path}/ceph-objectstore-tool --data-path BAD_DATA_PATH --op list".format(osd=ONEOSD, path=CEPH_BIN)
993 ERRORS += test_failure(cmd, "data-path: BAD_DATA_PATH: No such file or directory")
994
995 cmd = (CFSD_PREFIX + "--journal-path BAD_JOURNAL_PATH --op list").format(osd=ONEOSD)
996 ERRORS += test_failure(cmd, "journal-path: BAD_JOURNAL_PATH: No such file or directory")
997
998 cmd = (CFSD_PREFIX + "--journal-path /bin --op list").format(osd=ONEOSD)
999 ERRORS += test_failure(cmd, "journal-path: /bin: (21) Is a directory")
1000
1001 # On import can't use stdin from a terminal
1002 cmd = (CFSD_PREFIX + "--op import --pgid {pg}").format(osd=ONEOSD, pg=ONEPG)
1003 ERRORS += test_failure(cmd, "stdin is a tty and no --file filename specified", tty=True)
1004
1005 # On import can't use stdin from a terminal
1006 cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file -").format(osd=ONEOSD, pg=ONEPG)
1007 ERRORS += test_failure(cmd, "stdin is a tty and no --file filename specified", tty=True)
1008
1009 # Specify a bad --type
1010 os.mkdir(OSDDIR + "/fakeosd")
1011 cmd = ("{path}/ceph-objectstore-tool --data-path " + OSDDIR + "/{osd} --type foobar --op list --pgid {pg}").format(osd="fakeosd", pg=ONEPG, path=CEPH_BIN)
1012 ERRORS += test_failure(cmd, "Unable to create store of type foobar")
1013
1014 # Don't specify a data-path
1015 cmd = "{path}/ceph-objectstore-tool --type memstore --op list --pgid {pg}".format(dir=OSDDIR, osd=ONEOSD, pg=ONEPG, path=CEPH_BIN)
1016 ERRORS += test_failure(cmd, "Must provide --data-path")
1017
1018 cmd = (CFSD_PREFIX + "--op remove --pgid 2.0").format(osd=ONEOSD)
1019 ERRORS += test_failure(cmd, "Please use export-remove or you must use --force option")
1020
1021 cmd = (CFSD_PREFIX + "--force --op remove").format(osd=ONEOSD)
1022 ERRORS += test_failure(cmd, "Must provide pgid")
1023
1024 # Don't secify a --op nor object command
1025 cmd = CFSD_PREFIX.format(osd=ONEOSD)
1026 ERRORS += test_failure(cmd, "Must provide --op or object command...")
1027
1028 # Specify a bad --op command
1029 cmd = (CFSD_PREFIX + "--op oops").format(osd=ONEOSD)
1030 ERRORS += test_failure(cmd, "Must provide --op (info, log, remove, mkfs, fsck, repair, export, export-remove, import, list, fix-lost, list-pgs, rm-past-intervals, dump-journal, dump-super, meta-list, get-osdmap, set-osdmap, get-inc-osdmap, set-inc-osdmap, mark-complete, dump-import, trim-pg-log)")
1031
1032 # Provide just the object param not a command
1033 cmd = (CFSD_PREFIX + "object").format(osd=ONEOSD)
1034 ERRORS += test_failure(cmd, "Invalid syntax, missing command")
1035
1036 # Provide an object name that doesn't exist
1037 cmd = (CFSD_PREFIX + "NON_OBJECT get-bytes").format(osd=ONEOSD)
1038 ERRORS += test_failure(cmd, "No object id 'NON_OBJECT' found")
1039
1040 # Provide an invalid object command
1041 cmd = (CFSD_PREFIX + "--pgid {pg} '' notacommand").format(osd=ONEOSD, pg=ONEPG)
1042 ERRORS += test_failure(cmd, "Unknown object command 'notacommand'")
1043
1044 cmd = (CFSD_PREFIX + "foo list-omap").format(osd=ONEOSD, pg=ONEPG)
1045 ERRORS += test_failure(cmd, "No object id 'foo' found or invalid JSON specified")
1046
1047 cmd = (CFSD_PREFIX + "'{{\"oid\":\"obj4\",\"key\":\"\",\"snapid\":-1,\"hash\":2826278768,\"max\":0,\"pool\":1,\"namespace\":\"\"}}' list-omap").format(osd=ONEOSD, pg=ONEPG)
1048 ERRORS += test_failure(cmd, "Without --pgid the object '{\"oid\":\"obj4\",\"key\":\"\",\"snapid\":-1,\"hash\":2826278768,\"max\":0,\"pool\":1,\"namespace\":\"\"}' must be a JSON array")
1049
1050 cmd = (CFSD_PREFIX + "'[]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1051 ERRORS += test_failure(cmd, "Object '[]' must be a JSON array with 2 elements")
1052
1053 cmd = (CFSD_PREFIX + "'[\"1.0\"]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1054 ERRORS += test_failure(cmd, "Object '[\"1.0\"]' must be a JSON array with 2 elements")
1055
1056 cmd = (CFSD_PREFIX + "'[\"1.0\", 5, 8, 9]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1057 ERRORS += test_failure(cmd, "Object '[\"1.0\", 5, 8, 9]' must be a JSON array with 2 elements")
1058
1059 cmd = (CFSD_PREFIX + "'[1, 2]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1060 ERRORS += test_failure(cmd, "Object '[1, 2]' must be a JSON array with the first element a string")
1061
1062 cmd = (CFSD_PREFIX + "'[\"1.3\",{{\"snapid\":\"not an int\"}}]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1063 ERRORS += test_failure(cmd, "Decode object JSON error: value type is 2 not 4")
1064
1065 TMPFILE = r"/tmp/tmp.{pid}".format(pid=pid)
1066 ALLPGS = OBJREPPGS + OBJECPGS
1067 OSDS = get_osds(ALLPGS[0], OSDDIR)
1068 osd = OSDS[0]
1069
1070 print("Test all --op dump-journal")
1071 ALLOSDS = [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]
1072 ERRORS += test_dump_journal(CFSD_PREFIX, ALLOSDS)
1073
1074 # Test --op list and generate json for all objects
1075 print("Test --op list variants")
1076
1077 # retrieve all objects from all PGs
1078 tmpfd = open(TMPFILE, "wb")
1079 cmd = (CFSD_PREFIX + "--op list --format json").format(osd=osd)
1080 logging.debug(cmd)
1081 ret = call(cmd, shell=True, stdout=tmpfd)
1082 if ret != 0:
1083 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
1084 ERRORS += 1
1085 tmpfd.close()
1086 lines = get_lines(TMPFILE)
1087 JSONOBJ = sorted(set(lines))
1088 (pgid, coll, jsondict) = json.loads(JSONOBJ[0])[0]
1089
1090 # retrieve all objects in a given PG
1091 tmpfd = open(OTHERFILE, "ab")
1092 cmd = (CFSD_PREFIX + "--op list --pgid {pg} --format json").format(osd=osd, pg=pgid)
1093 logging.debug(cmd)
1094 ret = call(cmd, shell=True, stdout=tmpfd)
1095 if ret != 0:
1096 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
1097 ERRORS += 1
1098 tmpfd.close()
1099 lines = get_lines(OTHERFILE)
1100 JSONOBJ = sorted(set(lines))
1101 (other_pgid, other_coll, other_jsondict) = json.loads(JSONOBJ[0])[0]
1102
1103 if pgid != other_pgid or jsondict != other_jsondict or coll != other_coll:
1104 logging.error("the first line of --op list is different "
1105 "from the first line of --op list --pgid {pg}".format(pg=pgid))
1106 ERRORS += 1
1107
1108 # retrieve all objects with a given name in a given PG
1109 tmpfd = open(OTHERFILE, "wb")
1110 cmd = (CFSD_PREFIX + "--op list --pgid {pg} {object} --format json").format(osd=osd, pg=pgid, object=jsondict['oid'])
1111 logging.debug(cmd)
1112 ret = call(cmd, shell=True, stdout=tmpfd)
1113 if ret != 0:
1114 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
1115 ERRORS += 1
1116 tmpfd.close()
1117 lines = get_lines(OTHERFILE)
1118 JSONOBJ = sorted(set(lines))
1119 (other_pgid, other_coll, other_jsondict) in json.loads(JSONOBJ[0])[0]
1120
1121 if pgid != other_pgid or jsondict != other_jsondict or coll != other_coll:
1122 logging.error("the first line of --op list is different "
1123 "from the first line of --op list --pgid {pg} {object}".format(pg=pgid, object=jsondict['oid']))
1124 ERRORS += 1
1125
1126 print("Test --op list by generating json for all objects using default format")
1127 for pg in ALLPGS:
1128 OSDS = get_osds(pg, OSDDIR)
1129 for osd in OSDS:
1130 tmpfd = open(TMPFILE, "ab")
1131 cmd = (CFSD_PREFIX + "--op list --pgid {pg}").format(osd=osd, pg=pg)
1132 logging.debug(cmd)
1133 ret = call(cmd, shell=True, stdout=tmpfd)
1134 if ret != 0:
1135 logging.error("Bad exit status {ret} from --op list request".format(ret=ret))
1136 ERRORS += 1
1137
1138 tmpfd.close()
1139 lines = get_lines(TMPFILE)
1140 JSONOBJ = sorted(set(lines))
1141 for JSON in JSONOBJ:
1142 (pgid, jsondict) = json.loads(JSON)
1143 # Skip clones for now
1144 if jsondict['snapid'] != -2:
1145 continue
1146 db[jsondict['namespace']][jsondict['oid']]['json'] = json.dumps((pgid, jsondict))
1147 # print db[jsondict['namespace']][jsondict['oid']]['json']
1148 if jsondict['oid'].find(EC_NAME) == 0 and 'shard_id' not in jsondict:
1149 logging.error("Malformed JSON {json}".format(json=JSON))
1150 ERRORS += 1
1151
1152 # Test get-bytes
1153 print("Test get-bytes and set-bytes")
1154 for nspace in db.keys():
1155 for basename in db[nspace].keys():
1156 file = os.path.join(DATADIR, nspace + "-" + basename + "__head")
1157 JSON = db[nspace][basename]['json']
1158 GETNAME = "/tmp/getbytes.{pid}".format(pid=pid)
1159 TESTNAME = "/tmp/testbytes.{pid}".format(pid=pid)
1160 SETNAME = "/tmp/setbytes.{pid}".format(pid=pid)
1161 BADNAME = "/tmp/badbytes.{pid}".format(pid=pid)
1162 for pg in OBJREPPGS:
1163 OSDS = get_osds(pg, OSDDIR)
1164 for osd in OSDS:
1165 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1166 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1167 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1168 if not fnames:
1169 continue
1170 try:
1171 os.unlink(GETNAME)
1172 except:
1173 pass
1174 cmd = (CFSD_PREFIX + " --pgid {pg} '{json}' get-bytes {fname}").format(osd=osd, pg=pg, json=JSON, fname=GETNAME)
1175 logging.debug(cmd)
1176 ret = call(cmd, shell=True)
1177 if ret != 0:
1178 logging.error("Bad exit status {ret}".format(ret=ret))
1179 ERRORS += 1
1180 continue
1181 cmd = "diff -q {file} {getfile}".format(file=file, getfile=GETNAME)
1182 ret = call(cmd, shell=True)
1183 if ret != 0:
1184 logging.error("Data from get-bytes differ")
1185 logging.debug("Got:")
1186 cat_file(logging.DEBUG, GETNAME)
1187 logging.debug("Expected:")
1188 cat_file(logging.DEBUG, file)
1189 ERRORS += 1
1190 fd = open(SETNAME, "w")
1191 data = "put-bytes going into {file}\n".format(file=file)
1192 fd.write(data)
1193 fd.close()
1194 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' set-bytes {sname}").format(osd=osd, pg=pg, json=JSON, sname=SETNAME)
1195 logging.debug(cmd)
1196 ret = call(cmd, shell=True)
1197 if ret != 0:
1198 logging.error("Bad exit status {ret} from set-bytes".format(ret=ret))
1199 ERRORS += 1
1200 fd = open(TESTNAME, "wb")
1201 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-bytes -").format(osd=osd, pg=pg, json=JSON)
1202 logging.debug(cmd)
1203 ret = call(cmd, shell=True, stdout=fd)
1204 fd.close()
1205 if ret != 0:
1206 logging.error("Bad exit status {ret} from get-bytes".format(ret=ret))
1207 ERRORS += 1
1208 cmd = "diff -q {setfile} {testfile}".format(setfile=SETNAME, testfile=TESTNAME)
1209 logging.debug(cmd)
1210 ret = call(cmd, shell=True)
1211 if ret != 0:
1212 logging.error("Data after set-bytes differ")
1213 logging.debug("Got:")
1214 cat_file(logging.DEBUG, TESTNAME)
1215 logging.debug("Expected:")
1216 cat_file(logging.DEBUG, SETNAME)
1217 ERRORS += 1
1218
1219 # Use set-bytes with --dry-run and make sure contents haven't changed
1220 fd = open(BADNAME, "w")
1221 data = "Bad data for --dry-run in {file}\n".format(file=file)
1222 fd.write(data)
1223 fd.close()
1224 cmd = (CFSD_PREFIX + "--dry-run --pgid {pg} '{json}' set-bytes {sname}").format(osd=osd, pg=pg, json=JSON, sname=BADNAME)
1225 logging.debug(cmd)
1226 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1227 if ret != 0:
1228 logging.error("Bad exit status {ret} from set-bytes --dry-run".format(ret=ret))
1229 ERRORS += 1
1230 fd = open(TESTNAME, "wb")
1231 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-bytes -").format(osd=osd, pg=pg, json=JSON)
1232 logging.debug(cmd)
1233 ret = call(cmd, shell=True, stdout=fd)
1234 fd.close()
1235 if ret != 0:
1236 logging.error("Bad exit status {ret} from get-bytes".format(ret=ret))
1237 ERRORS += 1
1238 cmd = "diff -q {setfile} {testfile}".format(setfile=SETNAME, testfile=TESTNAME)
1239 logging.debug(cmd)
1240 ret = call(cmd, shell=True)
1241 if ret != 0:
1242 logging.error("Data after set-bytes --dry-run changed!")
1243 logging.debug("Got:")
1244 cat_file(logging.DEBUG, TESTNAME)
1245 logging.debug("Expected:")
1246 cat_file(logging.DEBUG, SETNAME)
1247 ERRORS += 1
1248
1249 fd = open(file, "rb")
1250 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' set-bytes").format(osd=osd, pg=pg, json=JSON)
1251 logging.debug(cmd)
1252 ret = call(cmd, shell=True, stdin=fd)
1253 if ret != 0:
1254 logging.error("Bad exit status {ret} from set-bytes to restore object".format(ret=ret))
1255 ERRORS += 1
1256 fd.close()
1257
1258 try:
1259 os.unlink(GETNAME)
1260 except:
1261 pass
1262 try:
1263 os.unlink(TESTNAME)
1264 except:
1265 pass
1266 try:
1267 os.unlink(SETNAME)
1268 except:
1269 pass
1270 try:
1271 os.unlink(BADNAME)
1272 except:
1273 pass
1274
1275 # Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap
1276 print("Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap")
1277 for nspace in db.keys():
1278 for basename in db[nspace].keys():
1279 file = os.path.join(DATADIR, nspace + "-" + basename + "__head")
1280 JSON = db[nspace][basename]['json']
1281 for pg in OBJREPPGS:
1282 OSDS = get_osds(pg, OSDDIR)
1283 for osd in OSDS:
1284 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1285 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1286 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1287 if not fnames:
1288 continue
1289 for key, val in db[nspace][basename]["xattr"].items():
1290 attrkey = "_" + key
1291 cmd = (CFSD_PREFIX + " '{json}' get-attr {key}").format(osd=osd, json=JSON, key=attrkey)
1292 logging.debug(cmd)
1293 getval = check_output(cmd, shell=True)
1294 if getval != val:
1295 logging.error("get-attr of key {key} returned wrong val: {get} instead of {orig}".format(key=attrkey, get=getval, orig=val))
1296 ERRORS += 1
1297 continue
1298 # set-attr to bogus value "foobar"
1299 cmd = ("echo -n foobar | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1300 logging.debug(cmd)
1301 ret = call(cmd, shell=True)
1302 if ret != 0:
1303 logging.error("Bad exit status {ret} from set-attr".format(ret=ret))
1304 ERRORS += 1
1305 continue
1306 # Test set-attr with dry-run
1307 cmd = ("echo -n dryrunbroken | " + CFSD_PREFIX + "--dry-run '{json}' set-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1308 logging.debug(cmd)
1309 ret = call(cmd, shell=True, stdout=nullfd)
1310 if ret != 0:
1311 logging.error("Bad exit status {ret} from set-attr".format(ret=ret))
1312 ERRORS += 1
1313 continue
1314 # Check the set-attr
1315 cmd = (CFSD_PREFIX + " --pgid {pg} '{json}' get-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1316 logging.debug(cmd)
1317 getval = check_output(cmd, shell=True)
1318 if ret != 0:
1319 logging.error("Bad exit status {ret} from get-attr".format(ret=ret))
1320 ERRORS += 1
1321 continue
1322 if getval != "foobar":
1323 logging.error("Check of set-attr failed because we got {val}".format(val=getval))
1324 ERRORS += 1
1325 continue
1326 # Test rm-attr
1327 cmd = (CFSD_PREFIX + "'{json}' rm-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1328 logging.debug(cmd)
1329 ret = call(cmd, shell=True)
1330 if ret != 0:
1331 logging.error("Bad exit status {ret} from rm-attr".format(ret=ret))
1332 ERRORS += 1
1333 continue
1334 # Check rm-attr with dry-run
1335 cmd = (CFSD_PREFIX + "--dry-run '{json}' rm-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1336 logging.debug(cmd)
1337 ret = call(cmd, shell=True, stdout=nullfd)
1338 if ret != 0:
1339 logging.error("Bad exit status {ret} from rm-attr".format(ret=ret))
1340 ERRORS += 1
1341 continue
1342 cmd = (CFSD_PREFIX + "'{json}' get-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1343 logging.debug(cmd)
1344 ret = call(cmd, shell=True, stderr=nullfd, stdout=nullfd)
1345 if ret == 0:
1346 logging.error("For rm-attr expect get-attr to fail, but it succeeded")
1347 ERRORS += 1
1348 # Put back value
1349 cmd = ("echo -n {val} | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey, val=val)
1350 logging.debug(cmd)
1351 ret = call(cmd, shell=True)
1352 if ret != 0:
1353 logging.error("Bad exit status {ret} from set-attr".format(ret=ret))
1354 ERRORS += 1
1355 continue
1356
1357 hdr = db[nspace][basename].get("omapheader", "")
1358 cmd = (CFSD_PREFIX + "'{json}' get-omaphdr").format(osd=osd, json=JSON)
1359 logging.debug(cmd)
1360 gethdr = check_output(cmd, shell=True)
1361 if gethdr != hdr:
1362 logging.error("get-omaphdr was wrong: {get} instead of {orig}".format(get=gethdr, orig=hdr))
1363 ERRORS += 1
1364 continue
1365 # set-omaphdr to bogus value "foobar"
1366 cmd = ("echo -n foobar | " + CFSD_PREFIX + "'{json}' set-omaphdr").format(osd=osd, pg=pg, json=JSON)
1367 logging.debug(cmd)
1368 ret = call(cmd, shell=True)
1369 if ret != 0:
1370 logging.error("Bad exit status {ret} from set-omaphdr".format(ret=ret))
1371 ERRORS += 1
1372 continue
1373 # Check the set-omaphdr
1374 cmd = (CFSD_PREFIX + "'{json}' get-omaphdr").format(osd=osd, pg=pg, json=JSON)
1375 logging.debug(cmd)
1376 gethdr = check_output(cmd, shell=True)
1377 if ret != 0:
1378 logging.error("Bad exit status {ret} from get-omaphdr".format(ret=ret))
1379 ERRORS += 1
1380 continue
1381 if gethdr != "foobar":
1382 logging.error("Check of set-omaphdr failed because we got {val}".format(val=getval))
1383 ERRORS += 1
1384 continue
1385 # Test dry-run with set-omaphdr
1386 cmd = ("echo -n dryrunbroken | " + CFSD_PREFIX + "--dry-run '{json}' set-omaphdr").format(osd=osd, pg=pg, json=JSON)
1387 logging.debug(cmd)
1388 ret = call(cmd, shell=True, stdout=nullfd)
1389 if ret != 0:
1390 logging.error("Bad exit status {ret} from set-omaphdr".format(ret=ret))
1391 ERRORS += 1
1392 continue
1393 # Put back value
1394 cmd = ("echo -n {val} | " + CFSD_PREFIX + "'{json}' set-omaphdr").format(osd=osd, pg=pg, json=JSON, val=hdr)
1395 logging.debug(cmd)
1396 ret = call(cmd, shell=True)
1397 if ret != 0:
1398 logging.error("Bad exit status {ret} from set-omaphdr".format(ret=ret))
1399 ERRORS += 1
1400 continue
1401
1402 for omapkey, val in db[nspace][basename]["omap"].items():
1403 cmd = (CFSD_PREFIX + " '{json}' get-omap {key}").format(osd=osd, json=JSON, key=omapkey)
1404 logging.debug(cmd)
1405 getval = check_output(cmd, shell=True)
1406 if getval != val:
1407 logging.error("get-omap of key {key} returned wrong val: {get} instead of {orig}".format(key=omapkey, get=getval, orig=val))
1408 ERRORS += 1
1409 continue
1410 # set-omap to bogus value "foobar"
1411 cmd = ("echo -n foobar | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1412 logging.debug(cmd)
1413 ret = call(cmd, shell=True)
1414 if ret != 0:
1415 logging.error("Bad exit status {ret} from set-omap".format(ret=ret))
1416 ERRORS += 1
1417 continue
1418 # Check set-omap with dry-run
1419 cmd = ("echo -n dryrunbroken | " + CFSD_PREFIX + "--dry-run --pgid {pg} '{json}' set-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1420 logging.debug(cmd)
1421 ret = call(cmd, shell=True, stdout=nullfd)
1422 if ret != 0:
1423 logging.error("Bad exit status {ret} from set-omap".format(ret=ret))
1424 ERRORS += 1
1425 continue
1426 # Check the set-omap
1427 cmd = (CFSD_PREFIX + " --pgid {pg} '{json}' get-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1428 logging.debug(cmd)
1429 getval = check_output(cmd, shell=True)
1430 if ret != 0:
1431 logging.error("Bad exit status {ret} from get-omap".format(ret=ret))
1432 ERRORS += 1
1433 continue
1434 if getval != "foobar":
1435 logging.error("Check of set-omap failed because we got {val}".format(val=getval))
1436 ERRORS += 1
1437 continue
1438 # Test rm-omap
1439 cmd = (CFSD_PREFIX + "'{json}' rm-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1440 logging.debug(cmd)
1441 ret = call(cmd, shell=True)
1442 if ret != 0:
1443 logging.error("Bad exit status {ret} from rm-omap".format(ret=ret))
1444 ERRORS += 1
1445 # Check rm-omap with dry-run
1446 cmd = (CFSD_PREFIX + "--dry-run '{json}' rm-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1447 logging.debug(cmd)
1448 ret = call(cmd, shell=True, stdout=nullfd)
1449 if ret != 0:
1450 logging.error("Bad exit status {ret} from rm-omap".format(ret=ret))
1451 ERRORS += 1
1452 cmd = (CFSD_PREFIX + "'{json}' get-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1453 logging.debug(cmd)
1454 ret = call(cmd, shell=True, stderr=nullfd, stdout=nullfd)
1455 if ret == 0:
1456 logging.error("For rm-omap expect get-omap to fail, but it succeeded")
1457 ERRORS += 1
1458 # Put back value
1459 cmd = ("echo -n {val} | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey, val=val)
1460 logging.debug(cmd)
1461 ret = call(cmd, shell=True)
1462 if ret != 0:
1463 logging.error("Bad exit status {ret} from set-omap".format(ret=ret))
1464 ERRORS += 1
1465 continue
1466
1467 # Test dump
1468 print("Test dump")
1469 for nspace in db.keys():
1470 for basename in db[nspace].keys():
1471 file = os.path.join(DATADIR, nspace + "-" + basename + "__head")
1472 JSON = db[nspace][basename]['json']
1473 GETNAME = "/tmp/getbytes.{pid}".format(pid=pid)
1474 for pg in OBJREPPGS:
1475 OSDS = get_osds(pg, OSDDIR)
1476 for osd in OSDS:
1477 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1478 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1479 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1480 if not fnames:
1481 continue
1482 if int(basename.split(REP_NAME)[1]) > int(NUM_CLONED_REP_OBJECTS):
1483 continue
1484 cmd = (CFSD_PREFIX + " '{json}' dump | grep '\"snap\": 1,' > /dev/null").format(osd=osd, json=JSON)
1485 logging.debug(cmd)
1486 ret = call(cmd, shell=True)
1487 if ret != 0:
1488 logging.error("Invalid dump for {json}".format(json=JSON))
1489 ERRORS += 1
1490
1491 print("Test list-attrs get-attr")
1492 ATTRFILE = r"/tmp/attrs.{pid}".format(pid=pid)
1493 VALFILE = r"/tmp/val.{pid}".format(pid=pid)
1494 for nspace in db.keys():
1495 for basename in db[nspace].keys():
1496 file = os.path.join(DATADIR, nspace + "-" + basename)
1497 JSON = db[nspace][basename]['json']
1498 jsondict = json.loads(JSON)
1499
1500 if 'shard_id' in jsondict:
1501 logging.debug("ECobject " + JSON)
1502 found = 0
1503 for pg in OBJECPGS:
1504 OSDS = get_osds(pg, OSDDIR)
1505 # Fix shard_id since we only have one json instance for each object
1506 jsondict['shard_id'] = int(pg.split('s')[1])
1507 JSON = json.dumps(jsondict)
1508 for osd in OSDS:
1509 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-attr hinfo_key").format(osd=osd, pg=pg, json=JSON)
1510 logging.debug("TRY: " + cmd)
1511 try:
1512 out = check_output(cmd, shell=True, stderr=subprocess.STDOUT)
1513 logging.debug("FOUND: {json} in {osd} has value '{val}'".format(osd=osd, json=JSON, val=out))
1514 found += 1
1515 except subprocess.CalledProcessError as e:
1516 if "No such file or directory" not in e.output and "No data available" not in e.output:
1517 raise
1518 # Assuming k=2 m=1 for the default ec pool
1519 if found != 3:
1520 logging.error("{json} hinfo_key found {found} times instead of 3".format(json=JSON, found=found))
1521 ERRORS += 1
1522
1523 for pg in ALLPGS:
1524 # Make sure rep obj with rep pg or ec obj with ec pg
1525 if ('shard_id' in jsondict) != (pg.find('s') > 0):
1526 continue
1527 if 'shard_id' in jsondict:
1528 # Fix shard_id since we only have one json instance for each object
1529 jsondict['shard_id'] = int(pg.split('s')[1])
1530 JSON = json.dumps(jsondict)
1531 OSDS = get_osds(pg, OSDDIR)
1532 for osd in OSDS:
1533 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1534 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1535 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1536 if not fnames:
1537 continue
1538 afd = open(ATTRFILE, "wb")
1539 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' list-attrs").format(osd=osd, pg=pg, json=JSON)
1540 logging.debug(cmd)
1541 ret = call(cmd, shell=True, stdout=afd)
1542 afd.close()
1543 if ret != 0:
1544 logging.error("list-attrs failed with {ret}".format(ret=ret))
1545 ERRORS += 1
1546 continue
1547 keys = get_lines(ATTRFILE)
1548 values = dict(db[nspace][basename]["xattr"])
1549 for key in keys:
1550 if key == "_" or key == "snapset" or key == "hinfo_key":
1551 continue
1552 key = key.strip("_")
1553 if key not in values:
1554 logging.error("Unexpected key {key} present".format(key=key))
1555 ERRORS += 1
1556 continue
1557 exp = values.pop(key)
1558 vfd = open(VALFILE, "wb")
1559 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-attr {key}").format(osd=osd, pg=pg, json=JSON, key="_" + key)
1560 logging.debug(cmd)
1561 ret = call(cmd, shell=True, stdout=vfd)
1562 vfd.close()
1563 if ret != 0:
1564 logging.error("get-attr failed with {ret}".format(ret=ret))
1565 ERRORS += 1
1566 continue
1567 lines = get_lines(VALFILE)
1568 val = lines[0]
1569 if exp != val:
1570 logging.error("For key {key} got value {got} instead of {expected}".format(key=key, got=val, expected=exp))
1571 ERRORS += 1
1572 if len(values) != 0:
1573 logging.error("Not all keys found, remaining keys:")
1574 print(values)
1575
1576 print("Test --op meta-list")
1577 tmpfd = open(TMPFILE, "wb")
1578 cmd = (CFSD_PREFIX + "--op meta-list").format(osd=ONEOSD)
1579 logging.debug(cmd)
1580 ret = call(cmd, shell=True, stdout=tmpfd)
1581 if ret != 0:
1582 logging.error("Bad exit status {ret} from --op meta-list request".format(ret=ret))
1583 ERRORS += 1
1584
1585 print("Test get-bytes on meta")
1586 tmpfd.close()
1587 lines = get_lines(TMPFILE)
1588 JSONOBJ = sorted(set(lines))
1589 for JSON in JSONOBJ:
1590 (pgid, jsondict) = json.loads(JSON)
1591 if pgid != "meta":
1592 logging.error("pgid incorrect for --op meta-list {pgid}".format(pgid=pgid))
1593 ERRORS += 1
1594 if jsondict['namespace'] != "":
1595 logging.error("namespace non null --op meta-list {ns}".format(ns=jsondict['namespace']))
1596 ERRORS += 1
1597 logging.info(JSON)
1598 try:
1599 os.unlink(GETNAME)
1600 except:
1601 pass
1602 cmd = (CFSD_PREFIX + "'{json}' get-bytes {fname}").format(osd=ONEOSD, json=JSON, fname=GETNAME)
1603 logging.debug(cmd)
1604 ret = call(cmd, shell=True)
1605 if ret != 0:
1606 logging.error("Bad exit status {ret}".format(ret=ret))
1607 ERRORS += 1
1608
1609 try:
1610 os.unlink(GETNAME)
1611 except:
1612 pass
1613 try:
1614 os.unlink(TESTNAME)
1615 except:
1616 pass
1617
1618 print("Test pg info")
1619 for pg in ALLREPPGS + ALLECPGS:
1620 for osd in get_osds(pg, OSDDIR):
1621 cmd = (CFSD_PREFIX + "--op info --pgid {pg} | grep '\"pgid\": \"{pg}\"'").format(osd=osd, pg=pg)
1622 logging.debug(cmd)
1623 ret = call(cmd, shell=True, stdout=nullfd)
1624 if ret != 0:
1625 logging.error("Getting info failed for pg {pg} from {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1626 ERRORS += 1
1627
1628 print("Test pg logging")
1629 if len(ALLREPPGS + ALLECPGS) == len(OBJREPPGS + OBJECPGS):
1630 logging.warning("All PGs have objects, so no log without modify entries")
1631 for pg in ALLREPPGS + ALLECPGS:
1632 for osd in get_osds(pg, OSDDIR):
1633 tmpfd = open(TMPFILE, "wb")
1634 cmd = (CFSD_PREFIX + "--op log --pgid {pg}").format(osd=osd, pg=pg)
1635 logging.debug(cmd)
1636 ret = call(cmd, shell=True, stdout=tmpfd)
1637 if ret != 0:
1638 logging.error("Getting log failed for pg {pg} from {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1639 ERRORS += 1
1640 HASOBJ = pg in OBJREPPGS + OBJECPGS
1641 MODOBJ = False
1642 for line in get_lines(TMPFILE):
1643 if line.find("modify") != -1:
1644 MODOBJ = True
1645 break
1646 if HASOBJ != MODOBJ:
1647 logging.error("Bad log for pg {pg} from {osd}".format(pg=pg, osd=osd))
1648 MSG = (HASOBJ and [""] or ["NOT "])[0]
1649 print("Log should {msg}have a modify entry".format(msg=MSG))
1650 ERRORS += 1
1651
1652 try:
1653 os.unlink(TMPFILE)
1654 except:
1655 pass
1656
1657 print("Test list-pgs")
1658 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1659
1660 CHECK_PGS = get_osd_pgs(os.path.join(OSDDIR, osd), None)
1661 CHECK_PGS = sorted(CHECK_PGS)
1662
1663 cmd = (CFSD_PREFIX + "--op list-pgs").format(osd=osd)
1664 logging.debug(cmd)
1665 TEST_PGS = check_output(cmd, shell=True).split("\n")
1666 TEST_PGS = sorted(TEST_PGS)[1:] # Skip extra blank line
1667
1668 if TEST_PGS != CHECK_PGS:
1669 logging.error("list-pgs got wrong result for osd.{osd}".format(osd=osd))
1670 logging.error("Expected {pgs}".format(pgs=CHECK_PGS))
1671 logging.error("Got {pgs}".format(pgs=TEST_PGS))
1672 ERRORS += 1
1673
1674 EXP_ERRORS = 0
1675 print("Test pg export --dry-run")
1676 pg = ALLREPPGS[0]
1677 osd = get_osds(pg, OSDDIR)[0]
1678 fname = "/tmp/fname.{pid}".format(pid=pid)
1679 cmd = (CFSD_PREFIX + "--dry-run --op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
1680 logging.debug(cmd)
1681 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1682 if ret != 0:
1683 logging.error("Exporting --dry-run failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1684 EXP_ERRORS += 1
1685 elif os.path.exists(fname):
1686 logging.error("Exporting --dry-run created file")
1687 EXP_ERRORS += 1
1688
1689 cmd = (CFSD_PREFIX + "--dry-run --op export --pgid {pg} > {file}").format(osd=osd, pg=pg, file=fname)
1690 logging.debug(cmd)
1691 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1692 if ret != 0:
1693 logging.error("Exporting --dry-run failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1694 EXP_ERRORS += 1
1695 else:
1696 outdata = get_lines(fname)
1697 if len(outdata) > 0:
1698 logging.error("Exporting --dry-run to stdout not empty")
1699 logging.error("Data: " + outdata)
1700 EXP_ERRORS += 1
1701
1702 os.mkdir(TESTDIR)
1703 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1704 os.mkdir(os.path.join(TESTDIR, osd))
1705 print("Test pg export")
1706 for pg in ALLREPPGS + ALLECPGS:
1707 for osd in get_osds(pg, OSDDIR):
1708 mydir = os.path.join(TESTDIR, osd)
1709 fname = os.path.join(mydir, pg)
1710 if pg == ALLREPPGS[0]:
1711 cmd = (CFSD_PREFIX + "--op export --pgid {pg} > {file}").format(osd=osd, pg=pg, file=fname)
1712 elif pg == ALLREPPGS[1]:
1713 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file - > {file}").format(osd=osd, pg=pg, file=fname)
1714 else:
1715 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
1716 logging.debug(cmd)
1717 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1718 if ret != 0:
1719 logging.error("Exporting failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1720 EXP_ERRORS += 1
1721
1722 ERRORS += EXP_ERRORS
1723
1724 print("Test pg removal")
1725 RM_ERRORS = 0
1726 for pg in ALLREPPGS + ALLECPGS:
1727 for osd in get_osds(pg, OSDDIR):
1728 # This should do nothing
1729 cmd = (CFSD_PREFIX + "--op remove --pgid {pg} --dry-run").format(pg=pg, osd=osd)
1730 logging.debug(cmd)
1731 ret = call(cmd, shell=True, stdout=nullfd)
1732 if ret != 0:
1733 logging.error("Removing --dry-run failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1734 RM_ERRORS += 1
1735 cmd = (CFSD_PREFIX + "--force --op remove --pgid {pg}").format(pg=pg, osd=osd)
1736 logging.debug(cmd)
1737 ret = call(cmd, shell=True, stdout=nullfd)
1738 if ret != 0:
1739 logging.error("Removing failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1740 RM_ERRORS += 1
1741
1742 ERRORS += RM_ERRORS
1743
1744 IMP_ERRORS = 0
1745 if EXP_ERRORS == 0 and RM_ERRORS == 0:
1746 print("Test pg import")
1747 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1748 dir = os.path.join(TESTDIR, osd)
1749 PGS = [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))]
1750 for pg in PGS:
1751 file = os.path.join(dir, pg)
1752 # Make sure this doesn't crash
1753 cmd = (CFSD_PREFIX + "--op dump-import --file {file}").format(osd=osd, file=file)
1754 logging.debug(cmd)
1755 ret = call(cmd, shell=True, stdout=nullfd)
1756 if ret != 0:
1757 logging.error("Dump-import failed from {file} with {ret}".format(file=file, ret=ret))
1758 IMP_ERRORS += 1
1759 # This should do nothing
1760 cmd = (CFSD_PREFIX + "--op import --file {file} --dry-run").format(osd=osd, file=file)
1761 logging.debug(cmd)
1762 ret = call(cmd, shell=True, stdout=nullfd)
1763 if ret != 0:
1764 logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret))
1765 IMP_ERRORS += 1
1766 if pg == PGS[0]:
1767 cmd = ("cat {file} |".format(file=file) + CFSD_PREFIX + "--op import").format(osd=osd)
1768 elif pg == PGS[1]:
1769 cmd = (CFSD_PREFIX + "--op import --file - --pgid {pg} < {file}").format(osd=osd, file=file, pg=pg)
1770 else:
1771 cmd = (CFSD_PREFIX + "--op import --file {file}").format(osd=osd, file=file)
1772 logging.debug(cmd)
1773 ret = call(cmd, shell=True, stdout=nullfd)
1774 if ret != 0:
1775 logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret))
1776 IMP_ERRORS += 1
1777 else:
1778 logging.warning("SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES")
1779
1780 ERRORS += IMP_ERRORS
1781 logging.debug(cmd)
1782
1783 if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0:
1784 print("Verify replicated import data")
1785 data_errors, _ = check_data(DATADIR, TMPFILE, OSDDIR, REP_NAME)
1786 ERRORS += data_errors
1787 else:
1788 logging.warning("SKIPPING CHECKING IMPORT DATA DUE TO PREVIOUS FAILURES")
1789
1790 print("Test all --op dump-journal again")
1791 ALLOSDS = [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]
1792 ERRORS += test_dump_journal(CFSD_PREFIX, ALLOSDS)
1793
1794 vstart(new=False)
1795 wait_for_health()
1796
1797 if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0:
1798 print("Verify erasure coded import data")
1799 ERRORS += verify(DATADIR, EC_POOL, EC_NAME, db)
1800 # Check replicated data/xattr/omap using rados
1801 print("Verify replicated import data using rados")
1802 ERRORS += verify(DATADIR, REP_POOL, REP_NAME, db)
1803
1804 if EXP_ERRORS == 0:
1805 NEWPOOL = "rados-import-pool"
1806 cmd = "{path}/rados mkpool {pool}".format(pool=NEWPOOL, path=CEPH_BIN)
1807 logging.debug(cmd)
1808 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1809
1810 print("Test rados import")
1811 first = True
1812 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1813 dir = os.path.join(TESTDIR, osd)
1814 for pg in [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))]:
1815 if pg.find("{id}.".format(id=REPID)) != 0:
1816 continue
1817 file = os.path.join(dir, pg)
1818 if first:
1819 first = False
1820 # This should do nothing
1821 cmd = "{path}/rados import -p {pool} --dry-run {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN)
1822 logging.debug(cmd)
1823 ret = call(cmd, shell=True, stdout=nullfd)
1824 if ret != 0:
1825 logging.error("Rados import --dry-run failed from {file} with {ret}".format(file=file, ret=ret))
1826 ERRORS += 1
1827 cmd = "{path}/rados -p {pool} ls".format(pool=NEWPOOL, path=CEPH_BIN)
1828 logging.debug(cmd)
1829 data = check_output(cmd, shell=True)
1830 if data:
1831 logging.error("'{data}'".format(data=data))
1832 logging.error("Found objects after dry-run")
1833 ERRORS += 1
1834 cmd = "{path}/rados import -p {pool} {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN)
1835 logging.debug(cmd)
1836 ret = call(cmd, shell=True, stdout=nullfd)
1837 if ret != 0:
1838 logging.error("Rados import failed from {file} with {ret}".format(file=file, ret=ret))
1839 ERRORS += 1
1840 cmd = "{path}/rados import -p {pool} --no-overwrite {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN)
1841 logging.debug(cmd)
1842 ret = call(cmd, shell=True, stdout=nullfd)
1843 if ret != 0:
1844 logging.error("Rados import --no-overwrite failed from {file} with {ret}".format(file=file, ret=ret))
1845 ERRORS += 1
1846
1847 ERRORS += verify(DATADIR, NEWPOOL, REP_NAME, db)
1848 else:
1849 logging.warning("SKIPPING IMPORT-RADOS TESTS DUE TO PREVIOUS FAILURES")
1850
1851 # Clear directories of previous portion
1852 call("/bin/rm -rf {dir}".format(dir=TESTDIR), shell=True)
1853 call("/bin/rm -rf {dir}".format(dir=DATADIR), shell=True)
1854 os.mkdir(TESTDIR)
1855 os.mkdir(DATADIR)
1856
1857 # Cause SPLIT_POOL to split and test import with object/log filtering
1858 print("Testing import all objects after a split")
1859 SPLIT_POOL = "split_pool"
1860 PG_COUNT = 1
1861 SPLIT_OBJ_COUNT = 5
1862 SPLIT_NSPACE_COUNT = 2
1863 SPLIT_NAME = "split"
1864 cmd = "{path}/ceph osd pool create {pool} {pg} {pg} replicated".format(pool=SPLIT_POOL, pg=PG_COUNT, path=CEPH_BIN)
1865 logging.debug(cmd)
1866 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1867 SPLITID = get_pool_id(SPLIT_POOL, nullfd)
1868 pool_size = int(check_output("{path}/ceph osd pool get {pool} size".format(pool=SPLIT_POOL, path=CEPH_BIN), shell=True, stderr=nullfd).split(" ")[1])
1869 EXP_ERRORS = 0
1870 RM_ERRORS = 0
1871 IMP_ERRORS = 0
1872
1873 objects = range(1, SPLIT_OBJ_COUNT + 1)
1874 nspaces = range(SPLIT_NSPACE_COUNT)
1875 for n in nspaces:
1876 nspace = get_nspace(n)
1877
1878 for i in objects:
1879 NAME = SPLIT_NAME + "{num}".format(num=i)
1880 LNAME = nspace + "-" + NAME
1881 DDNAME = os.path.join(DATADIR, LNAME)
1882 DDNAME += "__head"
1883
1884 cmd = "rm -f " + DDNAME
1885 logging.debug(cmd)
1886 call(cmd, shell=True)
1887
1888 if i == 1:
1889 dataline = range(DATALINECOUNT)
1890 else:
1891 dataline = range(1)
1892 fd = open(DDNAME, "w")
1893 data = "This is the split data for " + LNAME + "\n"
1894 for _ in dataline:
1895 fd.write(data)
1896 fd.close()
1897
1898 cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=SPLIT_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
1899 logging.debug(cmd)
1900 ret = call(cmd, shell=True, stderr=nullfd)
1901 if ret != 0:
1902 logging.critical("Rados put command failed with {ret}".format(ret=ret))
1903 return 1
1904
1905 wait_for_health()
1906 kill_daemons()
1907
1908 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1909 os.mkdir(os.path.join(TESTDIR, osd))
1910
1911 pg = "{pool}.0".format(pool=SPLITID)
1912 EXPORT_PG = pg
1913
1914 export_osds = get_osds(pg, OSDDIR)
1915 for osd in export_osds:
1916 mydir = os.path.join(TESTDIR, osd)
1917 fname = os.path.join(mydir, pg)
1918 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
1919 logging.debug(cmd)
1920 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1921 if ret != 0:
1922 logging.error("Exporting failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1923 EXP_ERRORS += 1
1924
1925 ERRORS += EXP_ERRORS
1926
1927 if EXP_ERRORS == 0:
1928 vstart(new=False)
1929 wait_for_health()
1930
1931 cmd = "{path}/ceph osd pool set {pool} pg_num 2".format(pool=SPLIT_POOL, path=CEPH_BIN)
1932 logging.debug(cmd)
1933 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1934 time.sleep(5)
1935 wait_for_health()
1936
1937 kill_daemons()
1938
1939 # Now 2 PGs, poolid.0 and poolid.1
1940 for seed in range(2):
1941 pg = "{pool}.{seed}".format(pool=SPLITID, seed=seed)
1942
1943 which = 0
1944 for osd in get_osds(pg, OSDDIR):
1945 cmd = (CFSD_PREFIX + "--force --op remove --pgid {pg}").format(pg=pg, osd=osd)
1946 logging.debug(cmd)
1947 ret = call(cmd, shell=True, stdout=nullfd)
1948
1949 # This is weird. The export files are based on only the EXPORT_PG
1950 # and where that pg was before the split. Use 'which' to use all
1951 # export copies in import.
1952 mydir = os.path.join(TESTDIR, export_osds[which])
1953 fname = os.path.join(mydir, EXPORT_PG)
1954 which += 1
1955 cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
1956 logging.debug(cmd)
1957 ret = call(cmd, shell=True, stdout=nullfd)
1958 if ret != 0:
1959 logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret))
1960 IMP_ERRORS += 1
1961
1962 ERRORS += IMP_ERRORS
1963
1964 # Start up again to make sure imports didn't corrupt anything
1965 if IMP_ERRORS == 0:
1966 print("Verify split import data")
1967 data_errors, count = check_data(DATADIR, TMPFILE, OSDDIR, SPLIT_NAME)
1968 ERRORS += data_errors
1969 if count != (SPLIT_OBJ_COUNT * SPLIT_NSPACE_COUNT * pool_size):
1970 logging.error("Incorrect number of replicas seen {count}".format(count=count))
1971 ERRORS += 1
1972 vstart(new=False)
1973 wait_for_health()
1974
1975 call("/bin/rm -rf {dir}".format(dir=TESTDIR), shell=True)
1976 call("/bin/rm -rf {dir}".format(dir=DATADIR), shell=True)
1977
1978 ERRORS += test_removeall(CFSD_PREFIX, db, OBJREPPGS, REP_POOL, CEPH_BIN, OSDDIR, REP_NAME, NUM_CLONED_REP_OBJECTS)
1979
1980 # vstart() starts 4 OSDs
1981 ERRORS += test_get_set_osdmap(CFSD_PREFIX, list(range(4)), ALLOSDS)
1982 ERRORS += test_get_set_inc_osdmap(CFSD_PREFIX, ALLOSDS[0])
1983
1984 kill_daemons()
1985 CORES = [f for f in os.listdir(CEPH_DIR) if f.startswith("core.")]
1986 if CORES:
1987 CORE_DIR = os.path.join("/tmp", "cores.{pid}".format(pid=os.getpid()))
1988 os.mkdir(CORE_DIR)
1989 call("/bin/mv {ceph_dir}/core.* {core_dir}".format(ceph_dir=CEPH_DIR, core_dir=CORE_DIR), shell=True)
1990 logging.error("Failure due to cores found")
1991 logging.error("See {core_dir} for cores".format(core_dir=CORE_DIR))
1992 ERRORS += len(CORES)
1993
1994 if ERRORS == 0:
1995 print("TEST PASSED")
1996 return 0
1997 else:
1998 print("TEST FAILED WITH {errcount} ERRORS".format(errcount=ERRORS))
1999 return 1
2000
2001
2002 def remove_btrfs_subvolumes(path):
2003 if platform.system() == "FreeBSD":
2004 return
2005 result = subprocess.Popen("stat -f -c '%%T' %s" % path, shell=True, stdout=subprocess.PIPE)
2006 for line in result.stdout:
2007 filesystem = decode(line).rstrip('\n')
2008 if filesystem == "btrfs":
2009 result = subprocess.Popen("sudo btrfs subvolume list %s" % path, shell=True, stdout=subprocess.PIPE)
2010 for line in result.stdout:
2011 subvolume = decode(line).split()[8]
2012 # extracting the relative volume name
2013 m = re.search(".*(%s.*)" % path, subvolume)
2014 if m:
2015 found = m.group(1)
2016 call("sudo btrfs subvolume delete %s" % found, shell=True)
2017
2018
2019 if __name__ == "__main__":
2020 status = 1
2021 try:
2022 status = main(sys.argv[1:])
2023 finally:
2024 kill_daemons()
2025 os.chdir(CEPH_BUILD_DIR)
2026 remove_btrfs_subvolumes(CEPH_DIR)
2027 call("/bin/rm -fr {dir}".format(dir=CEPH_DIR), shell=True)
2028 sys.exit(status)