]> git.proxmox.com Git - ceph.git/blob - ceph/qa/standalone/special/ceph_objectstore_tool.py
bump version to 15.2.4-pve1
[ceph.git] / ceph / qa / standalone / special / ceph_objectstore_tool.py
1 #!/usr/bin/python3
2
3 from __future__ import print_function
4 from subprocess import call
5 try:
6 from subprocess import check_output
7 except ImportError:
8 def check_output(*popenargs, **kwargs):
9 import subprocess
10 # backported from python 2.7 stdlib
11 process = subprocess.Popen(
12 stdout=subprocess.PIPE, *popenargs, **kwargs)
13 output, unused_err = process.communicate()
14 retcode = process.poll()
15 if retcode:
16 cmd = kwargs.get("args")
17 if cmd is None:
18 cmd = popenargs[0]
19 error = subprocess.CalledProcessError(retcode, cmd)
20 error.output = output
21 raise error
22 return output
23
24 import filecmp
25 import os
26 import subprocess
27 import math
28 import time
29 import sys
30 import re
31 import logging
32 import json
33 import tempfile
34 import platform
35
36 try:
37 from subprocess import DEVNULL
38 except ImportError:
39 DEVNULL = open(os.devnull, "wb")
40
41 logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.WARNING,
42 datefmt="%FT%T")
43
44
45 if sys.version_info[0] >= 3:
46 def decode(s):
47 return s.decode('utf-8')
48
49 def check_output(*args, **kwargs): # noqa
50 return decode(subprocess.check_output(*args, **kwargs))
51 else:
52 def decode(s):
53 return s
54
55
56
57 def wait_for_health():
58 print("Wait for health_ok...", end="")
59 tries = 0
60 while call("{path}/ceph health 2> /dev/null | grep -v 'HEALTH_OK\|HEALTH_WARN' > /dev/null".format(path=CEPH_BIN), shell=True) == 0:
61 tries += 1
62 if tries == 150:
63 raise Exception("Time exceeded to go to health")
64 time.sleep(1)
65 print("DONE")
66
67
68 def get_pool_id(name, nullfd):
69 cmd = "{path}/ceph osd pool stats {pool}".format(pool=name, path=CEPH_BIN).split()
70 # pool {pool} id # .... grab the 4 field
71 return check_output(cmd, stderr=nullfd).split()[3]
72
73
74 # return a list of unique PGS given an osd subdirectory
75 def get_osd_pgs(SUBDIR, ID):
76 PGS = []
77 if ID:
78 endhead = re.compile("{id}.*_head$".format(id=ID))
79 DIR = os.path.join(SUBDIR, "current")
80 PGS += [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and (ID is None or endhead.match(f))]
81 PGS = [re.sub("_head", "", p) for p in PGS if "_head" in p]
82 return PGS
83
84
85 # return a sorted list of unique PGs given a directory
86 def get_pgs(DIR, ID):
87 OSDS = [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and f.find("osd") == 0]
88 PGS = []
89 for d in OSDS:
90 SUBDIR = os.path.join(DIR, d)
91 PGS += get_osd_pgs(SUBDIR, ID)
92 return sorted(set(PGS))
93
94
95 # return a sorted list of PGS a subset of ALLPGS that contain objects with prefix specified
96 def get_objs(ALLPGS, prefix, DIR, ID):
97 OSDS = [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and f.find("osd") == 0]
98 PGS = []
99 for d in OSDS:
100 DIRL2 = os.path.join(DIR, d)
101 SUBDIR = os.path.join(DIRL2, "current")
102 for p in ALLPGS:
103 PGDIR = p + "_head"
104 if not os.path.isdir(os.path.join(SUBDIR, PGDIR)):
105 continue
106 FINALDIR = os.path.join(SUBDIR, PGDIR)
107 # See if there are any objects there
108 if any(f for f in [val for _, _, fl in os.walk(FINALDIR) for val in fl] if f.startswith(prefix)):
109 PGS += [p]
110 return sorted(set(PGS))
111
112
113 # return a sorted list of OSDS which have data from a given PG
114 def get_osds(PG, DIR):
115 ALLOSDS = [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and f.find("osd") == 0]
116 OSDS = []
117 for d in ALLOSDS:
118 DIRL2 = os.path.join(DIR, d)
119 SUBDIR = os.path.join(DIRL2, "current")
120 PGDIR = PG + "_head"
121 if not os.path.isdir(os.path.join(SUBDIR, PGDIR)):
122 continue
123 OSDS += [d]
124 return sorted(OSDS)
125
126
127 def get_lines(filename):
128 tmpfd = open(filename, "r")
129 line = True
130 lines = []
131 while line:
132 line = tmpfd.readline().rstrip('\n')
133 if line:
134 lines += [line]
135 tmpfd.close()
136 os.unlink(filename)
137 return lines
138
139
140 def cat_file(level, filename):
141 if level < logging.getLogger().getEffectiveLevel():
142 return
143 print("File: " + filename)
144 with open(filename, "r") as f:
145 while True:
146 line = f.readline().rstrip('\n')
147 if not line:
148 break
149 print(line)
150 print("<EOF>")
151
152
153 def vstart(new, opt="-o osd_pool_default_pg_autoscale_mode=off"):
154 print("vstarting....", end="")
155 NEW = new and "-n" or "-k"
156 call("MON=1 OSD=4 MDS=0 MGR=1 CEPH_PORT=7400 MGR_PYTHON_PATH={path}/src/pybind/mgr {path}/src/vstart.sh --filestore --short -l {new} -d {opt} > /dev/null 2>&1".format(new=NEW, opt=opt, path=CEPH_ROOT), shell=True)
157 print("DONE")
158
159
160 def test_failure(cmd, errmsg, tty=False):
161 if tty:
162 try:
163 ttyfd = open("/dev/tty", "rwb")
164 except Exception as e:
165 logging.info(str(e))
166 logging.info("SKIP " + cmd)
167 return 0
168 TMPFILE = r"/tmp/tmp.{pid}".format(pid=os.getpid())
169 tmpfd = open(TMPFILE, "wb")
170
171 logging.debug(cmd)
172 if tty:
173 ret = call(cmd, shell=True, stdin=ttyfd, stdout=ttyfd, stderr=tmpfd)
174 ttyfd.close()
175 else:
176 ret = call(cmd, shell=True, stderr=tmpfd)
177 tmpfd.close()
178 if ret == 0:
179 logging.error(cmd)
180 logging.error("Should have failed, but got exit 0")
181 return 1
182 lines = get_lines(TMPFILE)
183 matched = [ l for l in lines if errmsg in l ]
184 if any(matched):
185 logging.info("Correctly failed with message \"" + matched[0] + "\"")
186 return 0
187 else:
188 logging.error("Command: " + cmd )
189 logging.error("Bad messages to stderr \"" + str(lines) + "\"")
190 logging.error("Expected \"" + errmsg + "\"")
191 return 1
192
193
194 def get_nspace(num):
195 if num == 0:
196 return ""
197 return "ns{num}".format(num=num)
198
199
200 def verify(DATADIR, POOL, NAME_PREFIX, db):
201 TMPFILE = r"/tmp/tmp.{pid}".format(pid=os.getpid())
202 ERRORS = 0
203 for rawnsfile in [f for f in os.listdir(DATADIR) if f.split('-')[1].find(NAME_PREFIX) == 0]:
204 nsfile = rawnsfile.split("__")[0]
205 clone = rawnsfile.split("__")[1]
206 nspace = nsfile.split("-")[0]
207 file = nsfile.split("-")[1]
208 # Skip clones
209 if clone != "head":
210 continue
211 path = os.path.join(DATADIR, rawnsfile)
212 try:
213 os.unlink(TMPFILE)
214 except:
215 pass
216 cmd = "{path}/rados -p {pool} -N '{nspace}' get {file} {out}".format(pool=POOL, file=file, out=TMPFILE, nspace=nspace, path=CEPH_BIN)
217 logging.debug(cmd)
218 call(cmd, shell=True, stdout=DEVNULL, stderr=DEVNULL)
219 cmd = "diff -q {src} {result}".format(src=path, result=TMPFILE)
220 logging.debug(cmd)
221 ret = call(cmd, shell=True)
222 if ret != 0:
223 logging.error("{file} data not imported properly".format(file=file))
224 ERRORS += 1
225 try:
226 os.unlink(TMPFILE)
227 except:
228 pass
229 for key, val in db[nspace][file]["xattr"].items():
230 cmd = "{path}/rados -p {pool} -N '{nspace}' getxattr {name} {key}".format(pool=POOL, name=file, key=key, nspace=nspace, path=CEPH_BIN)
231 logging.debug(cmd)
232 getval = check_output(cmd, shell=True, stderr=DEVNULL)
233 logging.debug("getxattr {key} {val}".format(key=key, val=getval))
234 if getval != val:
235 logging.error("getxattr of key {key} returned wrong val: {get} instead of {orig}".format(key=key, get=getval, orig=val))
236 ERRORS += 1
237 continue
238 hdr = db[nspace][file].get("omapheader", "")
239 cmd = "{path}/rados -p {pool} -N '{nspace}' getomapheader {name} {file}".format(pool=POOL, name=file, nspace=nspace, file=TMPFILE, path=CEPH_BIN)
240 logging.debug(cmd)
241 ret = call(cmd, shell=True, stderr=DEVNULL)
242 if ret != 0:
243 logging.error("rados getomapheader returned {ret}".format(ret=ret))
244 ERRORS += 1
245 else:
246 getlines = get_lines(TMPFILE)
247 assert(len(getlines) == 0 or len(getlines) == 1)
248 if len(getlines) == 0:
249 gethdr = ""
250 else:
251 gethdr = getlines[0]
252 logging.debug("header: {hdr}".format(hdr=gethdr))
253 if gethdr != hdr:
254 logging.error("getomapheader returned wrong val: {get} instead of {orig}".format(get=gethdr, orig=hdr))
255 ERRORS += 1
256 for key, val in db[nspace][file]["omap"].items():
257 cmd = "{path}/rados -p {pool} -N '{nspace}' getomapval {name} {key} {file}".format(pool=POOL, name=file, key=key, nspace=nspace, file=TMPFILE, path=CEPH_BIN)
258 logging.debug(cmd)
259 ret = call(cmd, shell=True, stderr=DEVNULL)
260 if ret != 0:
261 logging.error("getomapval returned {ret}".format(ret=ret))
262 ERRORS += 1
263 continue
264 getlines = get_lines(TMPFILE)
265 if len(getlines) != 1:
266 logging.error("Bad data from getomapval {lines}".format(lines=getlines))
267 ERRORS += 1
268 continue
269 getval = getlines[0]
270 logging.debug("getomapval {key} {val}".format(key=key, val=getval))
271 if getval != val:
272 logging.error("getomapval returned wrong val: {get} instead of {orig}".format(get=getval, orig=val))
273 ERRORS += 1
274 try:
275 os.unlink(TMPFILE)
276 except:
277 pass
278 return ERRORS
279
280
281 def check_journal(jsondict):
282 errors = 0
283 if 'header' not in jsondict:
284 logging.error("Key 'header' not in dump-journal")
285 errors += 1
286 elif 'max_size' not in jsondict['header']:
287 logging.error("Key 'max_size' not in dump-journal header")
288 errors += 1
289 else:
290 print("\tJournal max_size = {size}".format(size=jsondict['header']['max_size']))
291 if 'entries' not in jsondict:
292 logging.error("Key 'entries' not in dump-journal output")
293 errors += 1
294 elif len(jsondict['entries']) == 0:
295 logging.info("No entries in journal found")
296 else:
297 errors += check_journal_entries(jsondict['entries'])
298 return errors
299
300
301 def check_journal_entries(entries):
302 errors = 0
303 for enum in range(len(entries)):
304 if 'offset' not in entries[enum]:
305 logging.error("No 'offset' key in entry {e}".format(e=enum))
306 errors += 1
307 if 'seq' not in entries[enum]:
308 logging.error("No 'seq' key in entry {e}".format(e=enum))
309 errors += 1
310 if 'transactions' not in entries[enum]:
311 logging.error("No 'transactions' key in entry {e}".format(e=enum))
312 errors += 1
313 elif len(entries[enum]['transactions']) == 0:
314 logging.error("No transactions found in entry {e}".format(e=enum))
315 errors += 1
316 else:
317 errors += check_entry_transactions(entries[enum], enum)
318 return errors
319
320
321 def check_entry_transactions(entry, enum):
322 errors = 0
323 for tnum in range(len(entry['transactions'])):
324 if 'trans_num' not in entry['transactions'][tnum]:
325 logging.error("Key 'trans_num' missing from entry {e} trans {t}".format(e=enum, t=tnum))
326 errors += 1
327 elif entry['transactions'][tnum]['trans_num'] != tnum:
328 ft = entry['transactions'][tnum]['trans_num']
329 logging.error("Bad trans_num ({ft}) entry {e} trans {t}".format(ft=ft, e=enum, t=tnum))
330 errors += 1
331 if 'ops' not in entry['transactions'][tnum]:
332 logging.error("Key 'ops' missing from entry {e} trans {t}".format(e=enum, t=tnum))
333 errors += 1
334 else:
335 errors += check_transaction_ops(entry['transactions'][tnum]['ops'], enum, tnum)
336 return errors
337
338
339 def check_transaction_ops(ops, enum, tnum):
340 if len(ops) == 0:
341 logging.warning("No ops found in entry {e} trans {t}".format(e=enum, t=tnum))
342 errors = 0
343 for onum in range(len(ops)):
344 if 'op_num' not in ops[onum]:
345 logging.error("Key 'op_num' missing from entry {e} trans {t} op {o}".format(e=enum, t=tnum, o=onum))
346 errors += 1
347 elif ops[onum]['op_num'] != onum:
348 fo = ops[onum]['op_num']
349 logging.error("Bad op_num ({fo}) from entry {e} trans {t} op {o}".format(fo=fo, e=enum, t=tnum, o=onum))
350 errors += 1
351 if 'op_name' not in ops[onum]:
352 logging.error("Key 'op_name' missing from entry {e} trans {t} op {o}".format(e=enum, t=tnum, o=onum))
353 errors += 1
354 return errors
355
356
357 def test_dump_journal(CFSD_PREFIX, osds):
358 ERRORS = 0
359 pid = os.getpid()
360 TMPFILE = r"/tmp/tmp.{pid}".format(pid=pid)
361
362 for osd in osds:
363 # Test --op dump-journal by loading json
364 cmd = (CFSD_PREFIX + "--op dump-journal --format json").format(osd=osd)
365 logging.debug(cmd)
366 tmpfd = open(TMPFILE, "wb")
367 ret = call(cmd, shell=True, stdout=tmpfd)
368 if ret != 0:
369 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
370 ERRORS += 1
371 continue
372 tmpfd.close()
373 tmpfd = open(TMPFILE, "r")
374 jsondict = json.load(tmpfd)
375 tmpfd.close()
376 os.unlink(TMPFILE)
377
378 journal_errors = check_journal(jsondict)
379 if journal_errors != 0:
380 logging.error(jsondict)
381 ERRORS += journal_errors
382
383 return ERRORS
384
385 CEPH_BUILD_DIR = os.environ.get('CEPH_BUILD_DIR')
386 CEPH_BIN = os.environ.get('CEPH_BIN')
387 CEPH_ROOT = os.environ.get('CEPH_ROOT')
388
389 if not CEPH_BUILD_DIR:
390 CEPH_BUILD_DIR=os.getcwd()
391 os.putenv('CEPH_BUILD_DIR', CEPH_BUILD_DIR)
392 CEPH_BIN=os.path.join(CEPH_BUILD_DIR, 'bin')
393 os.putenv('CEPH_BIN', CEPH_BIN)
394 CEPH_ROOT=os.path.dirname(CEPH_BUILD_DIR)
395 os.putenv('CEPH_ROOT', CEPH_ROOT)
396 CEPH_LIB=os.path.join(CEPH_BUILD_DIR, 'lib')
397 os.putenv('CEPH_LIB', CEPH_LIB)
398
399 try:
400 os.mkdir("td")
401 except:
402 pass # ok if this is already there
403 CEPH_DIR = os.path.join(CEPH_BUILD_DIR, os.path.join("td", "cot_dir"))
404 CEPH_CONF = os.path.join(CEPH_DIR, 'ceph.conf')
405
406 def kill_daemons():
407 call("{path}/init-ceph -c {conf} stop > /dev/null 2>&1".format(conf=CEPH_CONF, path=CEPH_BIN), shell=True)
408
409
410 def check_data(DATADIR, TMPFILE, OSDDIR, SPLIT_NAME):
411 repcount = 0
412 ERRORS = 0
413 for rawnsfile in [f for f in os.listdir(DATADIR) if f.split('-')[1].find(SPLIT_NAME) == 0]:
414 nsfile = rawnsfile.split("__")[0]
415 clone = rawnsfile.split("__")[1]
416 nspace = nsfile.split("-")[0]
417 file = nsfile.split("-")[1] + "__" + clone
418 # Skip clones
419 if clone != "head":
420 continue
421 path = os.path.join(DATADIR, rawnsfile)
422 tmpfd = open(TMPFILE, "wb")
423 cmd = "find {dir} -name '{file}_*_{nspace}_*'".format(dir=OSDDIR, file=file, nspace=nspace)
424 logging.debug(cmd)
425 ret = call(cmd, shell=True, stdout=tmpfd)
426 if ret:
427 logging.critical("INTERNAL ERROR")
428 return 1
429 tmpfd.close()
430 obj_locs = get_lines(TMPFILE)
431 if len(obj_locs) == 0:
432 logging.error("Can't find imported object {name}".format(name=file))
433 ERRORS += 1
434 for obj_loc in obj_locs:
435 # For btrfs skip snap_* dirs
436 if re.search("/snap_[0-9]*/", obj_loc) is not None:
437 continue
438 repcount += 1
439 cmd = "diff -q {src} {obj_loc}".format(src=path, obj_loc=obj_loc)
440 logging.debug(cmd)
441 ret = call(cmd, shell=True)
442 if ret != 0:
443 logging.error("{file} data not imported properly into {obj}".format(file=file, obj=obj_loc))
444 ERRORS += 1
445 return ERRORS, repcount
446
447
448 def set_osd_weight(CFSD_PREFIX, osd_ids, osd_path, weight):
449 # change the weight of osd.0 to math.pi in the newest osdmap of given osd
450 osdmap_file = tempfile.NamedTemporaryFile(delete=True)
451 cmd = (CFSD_PREFIX + "--op get-osdmap --file {osdmap_file}").format(osd=osd_path,
452 osdmap_file=osdmap_file.name)
453 output = check_output(cmd, shell=True)
454 epoch = int(re.findall('#(\d+)', output)[0])
455
456 new_crush_file = tempfile.NamedTemporaryFile(delete=True)
457 old_crush_file = tempfile.NamedTemporaryFile(delete=True)
458 ret = call("{path}/osdmaptool --export-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name,
459 crush_file=old_crush_file.name, path=CEPH_BIN),
460 stdout=DEVNULL,
461 stderr=DEVNULL,
462 shell=True)
463 assert(ret == 0)
464
465 for osd_id in osd_ids:
466 cmd = "{path}/crushtool -i {crush_file} --reweight-item osd.{osd} {weight} -o {new_crush_file}".format(osd=osd_id,
467 crush_file=old_crush_file.name,
468 weight=weight,
469 new_crush_file=new_crush_file.name, path=CEPH_BIN)
470 ret = call(cmd, stdout=DEVNULL, shell=True)
471 assert(ret == 0)
472 old_crush_file, new_crush_file = new_crush_file, old_crush_file
473
474 # change them back, since we don't need to preapre for another round
475 old_crush_file, new_crush_file = new_crush_file, old_crush_file
476 old_crush_file.close()
477
478 ret = call("{path}/osdmaptool --import-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name,
479 crush_file=new_crush_file.name, path=CEPH_BIN),
480 stdout=DEVNULL,
481 stderr=DEVNULL,
482 shell=True)
483 assert(ret == 0)
484
485 # Minimum test of --dry-run by using it, but not checking anything
486 cmd = CFSD_PREFIX + "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force --dry-run"
487 cmd = cmd.format(osd=osd_path, osdmap_file=osdmap_file.name, epoch=epoch)
488 ret = call(cmd, stdout=DEVNULL, shell=True)
489 assert(ret == 0)
490
491 # osdmaptool increases the epoch of the changed osdmap, so we need to force the tool
492 # to use use a different epoch than the one in osdmap
493 cmd = CFSD_PREFIX + "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force"
494 cmd = cmd.format(osd=osd_path, osdmap_file=osdmap_file.name, epoch=epoch)
495 ret = call(cmd, stdout=DEVNULL, shell=True)
496
497 return ret == 0
498
499 def get_osd_weights(CFSD_PREFIX, osd_ids, osd_path):
500 osdmap_file = tempfile.NamedTemporaryFile(delete=True)
501 cmd = (CFSD_PREFIX + "--op get-osdmap --file {osdmap_file}").format(osd=osd_path,
502 osdmap_file=osdmap_file.name)
503 ret = call(cmd, stdout=DEVNULL, shell=True)
504 if ret != 0:
505 return None
506 # we have to read the weights from the crush map, even we can query the weights using
507 # osdmaptool, but please keep in mind, they are different:
508 # item weights in crush map versus weight associated with each osd in osdmap
509 crush_file = tempfile.NamedTemporaryFile(delete=True)
510 ret = call("{path}/osdmaptool --export-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name,
511 crush_file=crush_file.name, path=CEPH_BIN),
512 stdout=DEVNULL,
513 shell=True)
514 assert(ret == 0)
515 output = check_output("{path}/crushtool --tree -i {crush_file} | tail -n {num_osd}".format(crush_file=crush_file.name,
516 num_osd=len(osd_ids), path=CEPH_BIN),
517 stderr=DEVNULL,
518 shell=True)
519 weights = []
520 for line in output.strip().split('\n'):
521 print(line)
522 linev = re.split('\s+', line)
523 if linev[0] == '':
524 linev.pop(0)
525 print('linev %s' % linev)
526 weights.append(float(linev[2]))
527
528 return weights
529
530
531 def test_get_set_osdmap(CFSD_PREFIX, osd_ids, osd_paths):
532 print("Testing get-osdmap and set-osdmap")
533 errors = 0
534 kill_daemons()
535 weight = 1 / math.e # just some magic number in [0, 1]
536 changed = []
537 for osd_path in osd_paths:
538 if set_osd_weight(CFSD_PREFIX, osd_ids, osd_path, weight):
539 changed.append(osd_path)
540 else:
541 logging.warning("Failed to change the weights: {0}".format(osd_path))
542 # i am pissed off if none of the store gets changed
543 if not changed:
544 errors += 1
545
546 for osd_path in changed:
547 weights = get_osd_weights(CFSD_PREFIX, osd_ids, osd_path)
548 if not weights:
549 errors += 1
550 continue
551 if any(abs(w - weight) > 1e-5 for w in weights):
552 logging.warning("Weight is not changed: {0} != {1}".format(weights, weight))
553 errors += 1
554 return errors
555
556 def test_get_set_inc_osdmap(CFSD_PREFIX, osd_path):
557 # incrementals are not used unless we need to build an MOSDMap to update
558 # OSD's peers, so an obvious way to test it is simply overwrite an epoch
559 # with a different copy, and read it back to see if it matches.
560 kill_daemons()
561 file_e2 = tempfile.NamedTemporaryFile(delete=True)
562 cmd = (CFSD_PREFIX + "--op get-inc-osdmap --file {file}").format(osd=osd_path,
563 file=file_e2.name)
564 output = check_output(cmd, shell=True)
565 epoch = int(re.findall('#(\d+)', output)[0])
566 # backup e1 incremental before overwriting it
567 epoch -= 1
568 file_e1_backup = tempfile.NamedTemporaryFile(delete=True)
569 cmd = CFSD_PREFIX + "--op get-inc-osdmap --epoch {epoch} --file {file}"
570 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_backup.name), shell=True)
571 if ret: return 1
572 # overwrite e1 with e2
573 cmd = CFSD_PREFIX + "--op set-inc-osdmap --force --epoch {epoch} --file {file}"
574 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e2.name), shell=True)
575 if ret: return 1
576 # Use dry-run to set back to e1 which shouldn't happen
577 cmd = CFSD_PREFIX + "--op set-inc-osdmap --dry-run --epoch {epoch} --file {file}"
578 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_backup.name), shell=True)
579 if ret: return 1
580 # read from e1
581 file_e1_read = tempfile.NamedTemporaryFile(delete=True)
582 cmd = CFSD_PREFIX + "--op get-inc-osdmap --epoch {epoch} --file {file}"
583 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_read.name), shell=True)
584 if ret: return 1
585 errors = 0
586 try:
587 if not filecmp.cmp(file_e2.name, file_e1_read.name, shallow=False):
588 logging.error("{{get,set}}-inc-osdmap mismatch {0} != {1}".format(file_e2.name, file_e1_read.name))
589 errors += 1
590 finally:
591 # revert the change with file_e1_backup
592 cmd = CFSD_PREFIX + "--op set-inc-osdmap --epoch {epoch} --file {file}"
593 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_backup.name), shell=True)
594 if ret:
595 logging.error("Failed to revert the changed inc-osdmap")
596 errors += 1
597
598 return errors
599
600
601 def test_removeall(CFSD_PREFIX, db, OBJREPPGS, REP_POOL, CEPH_BIN, OSDDIR, REP_NAME, NUM_CLONED_REP_OBJECTS):
602 # Test removeall
603 TMPFILE = r"/tmp/tmp.{pid}".format(pid=os.getpid())
604 nullfd = open(os.devnull, "w")
605 errors=0
606 print("Test removeall")
607 kill_daemons()
608 test_force_remove = 0
609 for nspace in db.keys():
610 for basename in db[nspace].keys():
611 JSON = db[nspace][basename]['json']
612 for pg in OBJREPPGS:
613 OSDS = get_osds(pg, OSDDIR)
614 for osd in OSDS:
615 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
616 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
617 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
618 if not fnames:
619 continue
620
621 if int(basename.split(REP_NAME)[1]) <= int(NUM_CLONED_REP_OBJECTS):
622 cmd = (CFSD_PREFIX + "'{json}' remove").format(osd=osd, json=JSON)
623 errors += test_failure(cmd, "Clones are present, use removeall to delete everything")
624 if not test_force_remove:
625
626 cmd = (CFSD_PREFIX + " '{json}' set-attr snapset /dev/null").format(osd=osd, json=JSON)
627 logging.debug(cmd)
628 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
629 if ret != 0:
630 logging.error("Test set-up to corrupt snapset failed for {json}".format(json=JSON))
631 errors += 1
632 # Do the removeall since this test failed to set-up
633 else:
634 test_force_remove = 1
635
636 cmd = (CFSD_PREFIX + " '{json}' --force remove").format(osd=osd, json=JSON)
637 logging.debug(cmd)
638 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
639 if ret != 0:
640 logging.error("forced remove with corrupt snapset failed for {json}".format(json=JSON))
641 errors += 1
642 continue
643
644 cmd = (CFSD_PREFIX + " --force --dry-run '{json}' remove").format(osd=osd, json=JSON)
645 logging.debug(cmd)
646 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
647 if ret != 0:
648 logging.error("remove with --force failed for {json}".format(json=JSON))
649 errors += 1
650
651 cmd = (CFSD_PREFIX + " --dry-run '{json}' removeall").format(osd=osd, json=JSON)
652 logging.debug(cmd)
653 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
654 if ret != 0:
655 logging.error("removeall failed for {json}".format(json=JSON))
656 errors += 1
657
658 cmd = (CFSD_PREFIX + " '{json}' removeall").format(osd=osd, json=JSON)
659 logging.debug(cmd)
660 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
661 if ret != 0:
662 logging.error("removeall failed for {json}".format(json=JSON))
663 errors += 1
664
665 tmpfd = open(TMPFILE, "w")
666 cmd = (CFSD_PREFIX + "--op list --pgid {pg} --namespace {ns} {name}").format(osd=osd, pg=pg, ns=nspace, name=basename)
667 logging.debug(cmd)
668 ret = call(cmd, shell=True, stdout=tmpfd)
669 if ret != 0:
670 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
671 errors += 1
672 tmpfd.close()
673 lines = get_lines(TMPFILE)
674 if len(lines) != 0:
675 logging.error("Removeall didn't remove all objects {ns}/{name} : {lines}".format(ns=nspace, name=basename, lines=lines))
676 errors += 1
677 vstart(new=False)
678 wait_for_health()
679 cmd = "{path}/rados -p {pool} rmsnap snap1".format(pool=REP_POOL, path=CEPH_BIN)
680 logging.debug(cmd)
681 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
682 if ret != 0:
683 logging.error("rados rmsnap failed")
684 errors += 1
685 time.sleep(2)
686 wait_for_health()
687 return errors
688
689
690 def main(argv):
691 if sys.version_info[0] < 3:
692 sys.stdout = stdout = os.fdopen(sys.stdout.fileno(), 'wb', 0)
693 else:
694 stdout = sys.stdout.buffer
695 if len(argv) > 1 and argv[1] == "debug":
696 nullfd = stdout
697 else:
698 nullfd = DEVNULL
699
700 call("rm -fr {dir}; mkdir -p {dir}".format(dir=CEPH_DIR), shell=True)
701 os.chdir(CEPH_DIR)
702 os.environ["CEPH_DIR"] = CEPH_DIR
703 OSDDIR = "dev"
704 REP_POOL = "rep_pool"
705 REP_NAME = "REPobject"
706 EC_POOL = "ec_pool"
707 EC_NAME = "ECobject"
708 if len(argv) > 0 and argv[0] == 'large':
709 PG_COUNT = 12
710 NUM_REP_OBJECTS = 200
711 NUM_CLONED_REP_OBJECTS = 50
712 NUM_EC_OBJECTS = 12
713 NUM_NSPACES = 4
714 # Larger data sets for first object per namespace
715 DATALINECOUNT = 50000
716 # Number of objects to do xattr/omap testing on
717 ATTR_OBJS = 10
718 else:
719 PG_COUNT = 4
720 NUM_REP_OBJECTS = 2
721 NUM_CLONED_REP_OBJECTS = 2
722 NUM_EC_OBJECTS = 2
723 NUM_NSPACES = 2
724 # Larger data sets for first object per namespace
725 DATALINECOUNT = 10
726 # Number of objects to do xattr/omap testing on
727 ATTR_OBJS = 2
728 ERRORS = 0
729 pid = os.getpid()
730 TESTDIR = "/tmp/test.{pid}".format(pid=pid)
731 DATADIR = "/tmp/data.{pid}".format(pid=pid)
732 CFSD_PREFIX = CEPH_BIN + "/ceph-objectstore-tool --no-mon-config --data-path " + OSDDIR + "/{osd} "
733 PROFNAME = "testecprofile"
734
735 os.environ['CEPH_CONF'] = CEPH_CONF
736 vstart(new=True)
737 wait_for_health()
738
739 cmd = "{path}/ceph osd pool create {pool} {pg} {pg} replicated".format(pool=REP_POOL, pg=PG_COUNT, path=CEPH_BIN)
740 logging.debug(cmd)
741 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
742 time.sleep(2)
743 REPID = get_pool_id(REP_POOL, nullfd)
744
745 print("Created Replicated pool #{repid}".format(repid=REPID))
746
747 cmd = "{path}/ceph osd erasure-code-profile set {prof} crush-failure-domain=osd".format(prof=PROFNAME, path=CEPH_BIN)
748 logging.debug(cmd)
749 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
750 cmd = "{path}/ceph osd erasure-code-profile get {prof}".format(prof=PROFNAME, path=CEPH_BIN)
751 logging.debug(cmd)
752 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
753 cmd = "{path}/ceph osd pool create {pool} {pg} {pg} erasure {prof}".format(pool=EC_POOL, prof=PROFNAME, pg=PG_COUNT, path=CEPH_BIN)
754 logging.debug(cmd)
755 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
756 ECID = get_pool_id(EC_POOL, nullfd)
757
758 print("Created Erasure coded pool #{ecid}".format(ecid=ECID))
759
760 print("Creating {objs} objects in replicated pool".format(objs=(NUM_REP_OBJECTS*NUM_NSPACES)))
761 cmd = "mkdir -p {datadir}".format(datadir=DATADIR)
762 logging.debug(cmd)
763 call(cmd, shell=True)
764
765 db = {}
766
767 objects = range(1, NUM_REP_OBJECTS + 1)
768 nspaces = range(NUM_NSPACES)
769 for n in nspaces:
770 nspace = get_nspace(n)
771
772 db[nspace] = {}
773
774 for i in objects:
775 NAME = REP_NAME + "{num}".format(num=i)
776 LNAME = nspace + "-" + NAME
777 DDNAME = os.path.join(DATADIR, LNAME)
778 DDNAME += "__head"
779
780 cmd = "rm -f " + DDNAME
781 logging.debug(cmd)
782 call(cmd, shell=True)
783
784 if i == 1:
785 dataline = range(DATALINECOUNT)
786 else:
787 dataline = range(1)
788 fd = open(DDNAME, "w")
789 data = "This is the replicated data for " + LNAME + "\n"
790 for _ in dataline:
791 fd.write(data)
792 fd.close()
793
794 cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=REP_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
795 logging.debug(cmd)
796 ret = call(cmd, shell=True, stderr=nullfd)
797 if ret != 0:
798 logging.critical("Rados put command failed with {ret}".format(ret=ret))
799 return 1
800
801 db[nspace][NAME] = {}
802
803 if i < ATTR_OBJS + 1:
804 keys = range(i)
805 else:
806 keys = range(0)
807 db[nspace][NAME]["xattr"] = {}
808 for k in keys:
809 if k == 0:
810 continue
811 mykey = "key{i}-{k}".format(i=i, k=k)
812 myval = "val{i}-{k}".format(i=i, k=k)
813 cmd = "{path}/rados -p {pool} -N '{nspace}' setxattr {name} {key} {val}".format(pool=REP_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN)
814 logging.debug(cmd)
815 ret = call(cmd, shell=True)
816 if ret != 0:
817 logging.error("setxattr failed with {ret}".format(ret=ret))
818 ERRORS += 1
819 db[nspace][NAME]["xattr"][mykey] = myval
820
821 # Create omap header in all objects but REPobject1
822 if i < ATTR_OBJS + 1 and i != 1:
823 myhdr = "hdr{i}".format(i=i)
824 cmd = "{path}/rados -p {pool} -N '{nspace}' setomapheader {name} {hdr}".format(pool=REP_POOL, name=NAME, hdr=myhdr, nspace=nspace, path=CEPH_BIN)
825 logging.debug(cmd)
826 ret = call(cmd, shell=True)
827 if ret != 0:
828 logging.critical("setomapheader failed with {ret}".format(ret=ret))
829 ERRORS += 1
830 db[nspace][NAME]["omapheader"] = myhdr
831
832 db[nspace][NAME]["omap"] = {}
833 for k in keys:
834 if k == 0:
835 continue
836 mykey = "okey{i}-{k}".format(i=i, k=k)
837 myval = "oval{i}-{k}".format(i=i, k=k)
838 cmd = "{path}/rados -p {pool} -N '{nspace}' setomapval {name} {key} {val}".format(pool=REP_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN)
839 logging.debug(cmd)
840 ret = call(cmd, shell=True)
841 if ret != 0:
842 logging.critical("setomapval failed with {ret}".format(ret=ret))
843 db[nspace][NAME]["omap"][mykey] = myval
844
845 # Create some clones
846 cmd = "{path}/rados -p {pool} mksnap snap1".format(pool=REP_POOL, path=CEPH_BIN)
847 logging.debug(cmd)
848 call(cmd, shell=True)
849
850 objects = range(1, NUM_CLONED_REP_OBJECTS + 1)
851 nspaces = range(NUM_NSPACES)
852 for n in nspaces:
853 nspace = get_nspace(n)
854
855 for i in objects:
856 NAME = REP_NAME + "{num}".format(num=i)
857 LNAME = nspace + "-" + NAME
858 DDNAME = os.path.join(DATADIR, LNAME)
859 # First clone
860 CLONENAME = DDNAME + "__1"
861 DDNAME += "__head"
862
863 cmd = "mv -f " + DDNAME + " " + CLONENAME
864 logging.debug(cmd)
865 call(cmd, shell=True)
866
867 if i == 1:
868 dataline = range(DATALINECOUNT)
869 else:
870 dataline = range(1)
871 fd = open(DDNAME, "w")
872 data = "This is the replicated data after a snapshot for " + LNAME + "\n"
873 for _ in dataline:
874 fd.write(data)
875 fd.close()
876
877 cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=REP_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
878 logging.debug(cmd)
879 ret = call(cmd, shell=True, stderr=nullfd)
880 if ret != 0:
881 logging.critical("Rados put command failed with {ret}".format(ret=ret))
882 return 1
883
884 print("Creating {objs} objects in erasure coded pool".format(objs=(NUM_EC_OBJECTS*NUM_NSPACES)))
885
886 objects = range(1, NUM_EC_OBJECTS + 1)
887 nspaces = range(NUM_NSPACES)
888 for n in nspaces:
889 nspace = get_nspace(n)
890
891 for i in objects:
892 NAME = EC_NAME + "{num}".format(num=i)
893 LNAME = nspace + "-" + NAME
894 DDNAME = os.path.join(DATADIR, LNAME)
895 DDNAME += "__head"
896
897 cmd = "rm -f " + DDNAME
898 logging.debug(cmd)
899 call(cmd, shell=True)
900
901 if i == 1:
902 dataline = range(DATALINECOUNT)
903 else:
904 dataline = range(1)
905 fd = open(DDNAME, "w")
906 data = "This is the erasure coded data for " + LNAME + "\n"
907 for j in dataline:
908 fd.write(data)
909 fd.close()
910
911 cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=EC_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
912 logging.debug(cmd)
913 ret = call(cmd, shell=True, stderr=nullfd)
914 if ret != 0:
915 logging.critical("Erasure coded pool creation failed with {ret}".format(ret=ret))
916 return 1
917
918 db[nspace][NAME] = {}
919
920 db[nspace][NAME]["xattr"] = {}
921 if i < ATTR_OBJS + 1:
922 keys = range(i)
923 else:
924 keys = range(0)
925 for k in keys:
926 if k == 0:
927 continue
928 mykey = "key{i}-{k}".format(i=i, k=k)
929 myval = "val{i}-{k}".format(i=i, k=k)
930 cmd = "{path}/rados -p {pool} -N '{nspace}' setxattr {name} {key} {val}".format(pool=EC_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN)
931 logging.debug(cmd)
932 ret = call(cmd, shell=True)
933 if ret != 0:
934 logging.error("setxattr failed with {ret}".format(ret=ret))
935 ERRORS += 1
936 db[nspace][NAME]["xattr"][mykey] = myval
937
938 # Omap isn't supported in EC pools
939 db[nspace][NAME]["omap"] = {}
940
941 logging.debug(db)
942
943 kill_daemons()
944
945 if ERRORS:
946 logging.critical("Unable to set up test")
947 return 1
948
949 ALLREPPGS = get_pgs(OSDDIR, REPID)
950 logging.debug(ALLREPPGS)
951 ALLECPGS = get_pgs(OSDDIR, ECID)
952 logging.debug(ALLECPGS)
953
954 OBJREPPGS = get_objs(ALLREPPGS, REP_NAME, OSDDIR, REPID)
955 logging.debug(OBJREPPGS)
956 OBJECPGS = get_objs(ALLECPGS, EC_NAME, OSDDIR, ECID)
957 logging.debug(OBJECPGS)
958
959 ONEPG = ALLREPPGS[0]
960 logging.debug(ONEPG)
961 osds = get_osds(ONEPG, OSDDIR)
962 ONEOSD = osds[0]
963 logging.debug(ONEOSD)
964
965 print("Test invalid parameters")
966 # On export can't use stdout to a terminal
967 cmd = (CFSD_PREFIX + "--op export --pgid {pg}").format(osd=ONEOSD, pg=ONEPG)
968 ERRORS += test_failure(cmd, "stdout is a tty and no --file filename specified", tty=True)
969
970 # On export can't use stdout to a terminal
971 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file -").format(osd=ONEOSD, pg=ONEPG)
972 ERRORS += test_failure(cmd, "stdout is a tty and no --file filename specified", tty=True)
973
974 # Prep a valid ec export file for import failure tests
975 ONEECPG = ALLECPGS[0]
976 osds = get_osds(ONEECPG, OSDDIR)
977 ONEECOSD = osds[0]
978 OTHERFILE = "/tmp/foo.{pid}".format(pid=pid)
979 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=ONEECOSD, pg=ONEECPG, file=OTHERFILE)
980 logging.debug(cmd)
981 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
982
983 os.unlink(OTHERFILE)
984
985 # Prep a valid export file for import failure tests
986 OTHERFILE = "/tmp/foo.{pid}".format(pid=pid)
987 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=ONEOSD, pg=ONEPG, file=OTHERFILE)
988 logging.debug(cmd)
989 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
990
991 # On import can't specify a different pgid than the file
992 TMPPG="{pool}.80".format(pool=REPID)
993 cmd = (CFSD_PREFIX + "--op import --pgid 12.dd --file {file}").format(osd=ONEOSD, pg=TMPPG, file=OTHERFILE)
994 ERRORS += test_failure(cmd, "specified pgid 12.dd does not match actual pgid")
995
996 os.unlink(OTHERFILE)
997 cmd = (CFSD_PREFIX + "--op import --file {FOO}").format(osd=ONEOSD, FOO=OTHERFILE)
998 ERRORS += test_failure(cmd, "file: {FOO}: No such file or directory".format(FOO=OTHERFILE))
999
1000 cmd = "{path}/ceph-objectstore-tool --no-mon-config --data-path BAD_DATA_PATH --op list".format(osd=ONEOSD, path=CEPH_BIN)
1001 ERRORS += test_failure(cmd, "data-path: BAD_DATA_PATH: No such file or directory")
1002
1003 cmd = (CFSD_PREFIX + "--journal-path BAD_JOURNAL_PATH --op list").format(osd=ONEOSD)
1004 ERRORS += test_failure(cmd, "journal-path: BAD_JOURNAL_PATH: No such file or directory")
1005
1006 cmd = (CFSD_PREFIX + "--journal-path /bin --op list").format(osd=ONEOSD)
1007 ERRORS += test_failure(cmd, "journal-path: /bin: (21) Is a directory")
1008
1009 # On import can't use stdin from a terminal
1010 cmd = (CFSD_PREFIX + "--op import --pgid {pg}").format(osd=ONEOSD, pg=ONEPG)
1011 ERRORS += test_failure(cmd, "stdin is a tty and no --file filename specified", tty=True)
1012
1013 # On import can't use stdin from a terminal
1014 cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file -").format(osd=ONEOSD, pg=ONEPG)
1015 ERRORS += test_failure(cmd, "stdin is a tty and no --file filename specified", tty=True)
1016
1017 # Specify a bad --type
1018 os.mkdir(OSDDIR + "/fakeosd")
1019 cmd = ("{path}/ceph-objectstore-tool --no-mon-config --data-path " + OSDDIR + "/{osd} --type foobar --op list --pgid {pg}").format(osd="fakeosd", pg=ONEPG, path=CEPH_BIN)
1020 ERRORS += test_failure(cmd, "Unable to create store of type foobar")
1021
1022 # Don't specify a data-path
1023 cmd = "{path}/ceph-objectstore-tool --no-mon-config --type memstore --op list --pgid {pg}".format(dir=OSDDIR, osd=ONEOSD, pg=ONEPG, path=CEPH_BIN)
1024 ERRORS += test_failure(cmd, "Must provide --data-path")
1025
1026 cmd = (CFSD_PREFIX + "--op remove --pgid 2.0").format(osd=ONEOSD)
1027 ERRORS += test_failure(cmd, "Please use export-remove or you must use --force option")
1028
1029 cmd = (CFSD_PREFIX + "--force --op remove").format(osd=ONEOSD)
1030 ERRORS += test_failure(cmd, "Must provide pgid")
1031
1032 # Don't secify a --op nor object command
1033 cmd = CFSD_PREFIX.format(osd=ONEOSD)
1034 ERRORS += test_failure(cmd, "Must provide --op or object command...")
1035
1036 # Specify a bad --op command
1037 cmd = (CFSD_PREFIX + "--op oops").format(osd=ONEOSD)
1038 ERRORS += test_failure(cmd, "Must provide --op (info, log, remove, mkfs, fsck, repair, export, export-remove, import, list, fix-lost, list-pgs, dump-journal, dump-super, meta-list, get-osdmap, set-osdmap, get-inc-osdmap, set-inc-osdmap, mark-complete, reset-last-complete, dump-export, trim-pg-log)")
1039
1040 # Provide just the object param not a command
1041 cmd = (CFSD_PREFIX + "object").format(osd=ONEOSD)
1042 ERRORS += test_failure(cmd, "Invalid syntax, missing command")
1043
1044 # Provide an object name that doesn't exist
1045 cmd = (CFSD_PREFIX + "NON_OBJECT get-bytes").format(osd=ONEOSD)
1046 ERRORS += test_failure(cmd, "No object id 'NON_OBJECT' found")
1047
1048 # Provide an invalid object command
1049 cmd = (CFSD_PREFIX + "--pgid {pg} '' notacommand").format(osd=ONEOSD, pg=ONEPG)
1050 ERRORS += test_failure(cmd, "Unknown object command 'notacommand'")
1051
1052 cmd = (CFSD_PREFIX + "foo list-omap").format(osd=ONEOSD, pg=ONEPG)
1053 ERRORS += test_failure(cmd, "No object id 'foo' found or invalid JSON specified")
1054
1055 cmd = (CFSD_PREFIX + "'{{\"oid\":\"obj4\",\"key\":\"\",\"snapid\":-1,\"hash\":2826278768,\"max\":0,\"pool\":1,\"namespace\":\"\"}}' list-omap").format(osd=ONEOSD, pg=ONEPG)
1056 ERRORS += test_failure(cmd, "Without --pgid the object '{\"oid\":\"obj4\",\"key\":\"\",\"snapid\":-1,\"hash\":2826278768,\"max\":0,\"pool\":1,\"namespace\":\"\"}' must be a JSON array")
1057
1058 cmd = (CFSD_PREFIX + "'[]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1059 ERRORS += test_failure(cmd, "Object '[]' must be a JSON array with 2 elements")
1060
1061 cmd = (CFSD_PREFIX + "'[\"1.0\"]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1062 ERRORS += test_failure(cmd, "Object '[\"1.0\"]' must be a JSON array with 2 elements")
1063
1064 cmd = (CFSD_PREFIX + "'[\"1.0\", 5, 8, 9]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1065 ERRORS += test_failure(cmd, "Object '[\"1.0\", 5, 8, 9]' must be a JSON array with 2 elements")
1066
1067 cmd = (CFSD_PREFIX + "'[1, 2]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1068 ERRORS += test_failure(cmd, "Object '[1, 2]' must be a JSON array with the first element a string")
1069
1070 cmd = (CFSD_PREFIX + "'[\"1.3\",{{\"snapid\":\"not an int\"}}]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1071 ERRORS += test_failure(cmd, "Decode object JSON error: value type is 2 not 4")
1072
1073 TMPFILE = r"/tmp/tmp.{pid}".format(pid=pid)
1074 ALLPGS = OBJREPPGS + OBJECPGS
1075 OSDS = get_osds(ALLPGS[0], OSDDIR)
1076 osd = OSDS[0]
1077
1078 print("Test all --op dump-journal")
1079 ALLOSDS = [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]
1080 ERRORS += test_dump_journal(CFSD_PREFIX, ALLOSDS)
1081
1082 # Test --op list and generate json for all objects
1083 print("Test --op list variants")
1084
1085 # retrieve all objects from all PGs
1086 tmpfd = open(TMPFILE, "wb")
1087 cmd = (CFSD_PREFIX + "--op list --format json").format(osd=osd)
1088 logging.debug(cmd)
1089 ret = call(cmd, shell=True, stdout=tmpfd)
1090 if ret != 0:
1091 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
1092 ERRORS += 1
1093 tmpfd.close()
1094 lines = get_lines(TMPFILE)
1095 JSONOBJ = sorted(set(lines))
1096 (pgid, coll, jsondict) = json.loads(JSONOBJ[0])[0]
1097
1098 # retrieve all objects in a given PG
1099 tmpfd = open(OTHERFILE, "ab")
1100 cmd = (CFSD_PREFIX + "--op list --pgid {pg} --format json").format(osd=osd, pg=pgid)
1101 logging.debug(cmd)
1102 ret = call(cmd, shell=True, stdout=tmpfd)
1103 if ret != 0:
1104 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
1105 ERRORS += 1
1106 tmpfd.close()
1107 lines = get_lines(OTHERFILE)
1108 JSONOBJ = sorted(set(lines))
1109 (other_pgid, other_coll, other_jsondict) = json.loads(JSONOBJ[0])[0]
1110
1111 if pgid != other_pgid or jsondict != other_jsondict or coll != other_coll:
1112 logging.error("the first line of --op list is different "
1113 "from the first line of --op list --pgid {pg}".format(pg=pgid))
1114 ERRORS += 1
1115
1116 # retrieve all objects with a given name in a given PG
1117 tmpfd = open(OTHERFILE, "wb")
1118 cmd = (CFSD_PREFIX + "--op list --pgid {pg} {object} --format json").format(osd=osd, pg=pgid, object=jsondict['oid'])
1119 logging.debug(cmd)
1120 ret = call(cmd, shell=True, stdout=tmpfd)
1121 if ret != 0:
1122 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
1123 ERRORS += 1
1124 tmpfd.close()
1125 lines = get_lines(OTHERFILE)
1126 JSONOBJ = sorted(set(lines))
1127 (other_pgid, other_coll, other_jsondict) in json.loads(JSONOBJ[0])[0]
1128
1129 if pgid != other_pgid or jsondict != other_jsondict or coll != other_coll:
1130 logging.error("the first line of --op list is different "
1131 "from the first line of --op list --pgid {pg} {object}".format(pg=pgid, object=jsondict['oid']))
1132 ERRORS += 1
1133
1134 print("Test --op list by generating json for all objects using default format")
1135 for pg in ALLPGS:
1136 OSDS = get_osds(pg, OSDDIR)
1137 for osd in OSDS:
1138 tmpfd = open(TMPFILE, "ab")
1139 cmd = (CFSD_PREFIX + "--op list --pgid {pg}").format(osd=osd, pg=pg)
1140 logging.debug(cmd)
1141 ret = call(cmd, shell=True, stdout=tmpfd)
1142 if ret != 0:
1143 logging.error("Bad exit status {ret} from --op list request".format(ret=ret))
1144 ERRORS += 1
1145
1146 tmpfd.close()
1147 lines = get_lines(TMPFILE)
1148 JSONOBJ = sorted(set(lines))
1149 for JSON in JSONOBJ:
1150 (pgid, jsondict) = json.loads(JSON)
1151 # Skip clones for now
1152 if jsondict['snapid'] != -2:
1153 continue
1154 db[jsondict['namespace']][jsondict['oid']]['json'] = json.dumps((pgid, jsondict))
1155 # print db[jsondict['namespace']][jsondict['oid']]['json']
1156 if jsondict['oid'].find(EC_NAME) == 0 and 'shard_id' not in jsondict:
1157 logging.error("Malformed JSON {json}".format(json=JSON))
1158 ERRORS += 1
1159
1160 # Test get-bytes
1161 print("Test get-bytes and set-bytes")
1162 for nspace in db.keys():
1163 for basename in db[nspace].keys():
1164 file = os.path.join(DATADIR, nspace + "-" + basename + "__head")
1165 JSON = db[nspace][basename]['json']
1166 GETNAME = "/tmp/getbytes.{pid}".format(pid=pid)
1167 TESTNAME = "/tmp/testbytes.{pid}".format(pid=pid)
1168 SETNAME = "/tmp/setbytes.{pid}".format(pid=pid)
1169 BADNAME = "/tmp/badbytes.{pid}".format(pid=pid)
1170 for pg in OBJREPPGS:
1171 OSDS = get_osds(pg, OSDDIR)
1172 for osd in OSDS:
1173 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1174 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1175 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1176 if not fnames:
1177 continue
1178 try:
1179 os.unlink(GETNAME)
1180 except:
1181 pass
1182 cmd = (CFSD_PREFIX + " --pgid {pg} '{json}' get-bytes {fname}").format(osd=osd, pg=pg, json=JSON, fname=GETNAME)
1183 logging.debug(cmd)
1184 ret = call(cmd, shell=True)
1185 if ret != 0:
1186 logging.error("Bad exit status {ret}".format(ret=ret))
1187 ERRORS += 1
1188 continue
1189 cmd = "diff -q {file} {getfile}".format(file=file, getfile=GETNAME)
1190 ret = call(cmd, shell=True)
1191 if ret != 0:
1192 logging.error("Data from get-bytes differ")
1193 logging.debug("Got:")
1194 cat_file(logging.DEBUG, GETNAME)
1195 logging.debug("Expected:")
1196 cat_file(logging.DEBUG, file)
1197 ERRORS += 1
1198 fd = open(SETNAME, "w")
1199 data = "put-bytes going into {file}\n".format(file=file)
1200 fd.write(data)
1201 fd.close()
1202 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' set-bytes {sname}").format(osd=osd, pg=pg, json=JSON, sname=SETNAME)
1203 logging.debug(cmd)
1204 ret = call(cmd, shell=True)
1205 if ret != 0:
1206 logging.error("Bad exit status {ret} from set-bytes".format(ret=ret))
1207 ERRORS += 1
1208 fd = open(TESTNAME, "wb")
1209 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-bytes -").format(osd=osd, pg=pg, json=JSON)
1210 logging.debug(cmd)
1211 ret = call(cmd, shell=True, stdout=fd)
1212 fd.close()
1213 if ret != 0:
1214 logging.error("Bad exit status {ret} from get-bytes".format(ret=ret))
1215 ERRORS += 1
1216 cmd = "diff -q {setfile} {testfile}".format(setfile=SETNAME, testfile=TESTNAME)
1217 logging.debug(cmd)
1218 ret = call(cmd, shell=True)
1219 if ret != 0:
1220 logging.error("Data after set-bytes differ")
1221 logging.debug("Got:")
1222 cat_file(logging.DEBUG, TESTNAME)
1223 logging.debug("Expected:")
1224 cat_file(logging.DEBUG, SETNAME)
1225 ERRORS += 1
1226
1227 # Use set-bytes with --dry-run and make sure contents haven't changed
1228 fd = open(BADNAME, "w")
1229 data = "Bad data for --dry-run in {file}\n".format(file=file)
1230 fd.write(data)
1231 fd.close()
1232 cmd = (CFSD_PREFIX + "--dry-run --pgid {pg} '{json}' set-bytes {sname}").format(osd=osd, pg=pg, json=JSON, sname=BADNAME)
1233 logging.debug(cmd)
1234 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1235 if ret != 0:
1236 logging.error("Bad exit status {ret} from set-bytes --dry-run".format(ret=ret))
1237 ERRORS += 1
1238 fd = open(TESTNAME, "wb")
1239 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-bytes -").format(osd=osd, pg=pg, json=JSON)
1240 logging.debug(cmd)
1241 ret = call(cmd, shell=True, stdout=fd)
1242 fd.close()
1243 if ret != 0:
1244 logging.error("Bad exit status {ret} from get-bytes".format(ret=ret))
1245 ERRORS += 1
1246 cmd = "diff -q {setfile} {testfile}".format(setfile=SETNAME, testfile=TESTNAME)
1247 logging.debug(cmd)
1248 ret = call(cmd, shell=True)
1249 if ret != 0:
1250 logging.error("Data after set-bytes --dry-run changed!")
1251 logging.debug("Got:")
1252 cat_file(logging.DEBUG, TESTNAME)
1253 logging.debug("Expected:")
1254 cat_file(logging.DEBUG, SETNAME)
1255 ERRORS += 1
1256
1257 fd = open(file, "rb")
1258 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' set-bytes").format(osd=osd, pg=pg, json=JSON)
1259 logging.debug(cmd)
1260 ret = call(cmd, shell=True, stdin=fd)
1261 if ret != 0:
1262 logging.error("Bad exit status {ret} from set-bytes to restore object".format(ret=ret))
1263 ERRORS += 1
1264 fd.close()
1265
1266 try:
1267 os.unlink(GETNAME)
1268 except:
1269 pass
1270 try:
1271 os.unlink(TESTNAME)
1272 except:
1273 pass
1274 try:
1275 os.unlink(SETNAME)
1276 except:
1277 pass
1278 try:
1279 os.unlink(BADNAME)
1280 except:
1281 pass
1282
1283 # Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap
1284 print("Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap")
1285 for nspace in db.keys():
1286 for basename in db[nspace].keys():
1287 file = os.path.join(DATADIR, nspace + "-" + basename + "__head")
1288 JSON = db[nspace][basename]['json']
1289 for pg in OBJREPPGS:
1290 OSDS = get_osds(pg, OSDDIR)
1291 for osd in OSDS:
1292 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1293 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1294 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1295 if not fnames:
1296 continue
1297 for key, val in db[nspace][basename]["xattr"].items():
1298 attrkey = "_" + key
1299 cmd = (CFSD_PREFIX + " '{json}' get-attr {key}").format(osd=osd, json=JSON, key=attrkey)
1300 logging.debug(cmd)
1301 getval = check_output(cmd, shell=True)
1302 if getval != val:
1303 logging.error("get-attr of key {key} returned wrong val: {get} instead of {orig}".format(key=attrkey, get=getval, orig=val))
1304 ERRORS += 1
1305 continue
1306 # set-attr to bogus value "foobar"
1307 cmd = ("echo -n foobar | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1308 logging.debug(cmd)
1309 ret = call(cmd, shell=True)
1310 if ret != 0:
1311 logging.error("Bad exit status {ret} from set-attr".format(ret=ret))
1312 ERRORS += 1
1313 continue
1314 # Test set-attr with dry-run
1315 cmd = ("echo -n dryrunbroken | " + CFSD_PREFIX + "--dry-run '{json}' set-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1316 logging.debug(cmd)
1317 ret = call(cmd, shell=True, stdout=nullfd)
1318 if ret != 0:
1319 logging.error("Bad exit status {ret} from set-attr".format(ret=ret))
1320 ERRORS += 1
1321 continue
1322 # Check the set-attr
1323 cmd = (CFSD_PREFIX + " --pgid {pg} '{json}' get-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1324 logging.debug(cmd)
1325 getval = check_output(cmd, shell=True)
1326 if ret != 0:
1327 logging.error("Bad exit status {ret} from get-attr".format(ret=ret))
1328 ERRORS += 1
1329 continue
1330 if getval != "foobar":
1331 logging.error("Check of set-attr failed because we got {val}".format(val=getval))
1332 ERRORS += 1
1333 continue
1334 # Test rm-attr
1335 cmd = (CFSD_PREFIX + "'{json}' rm-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1336 logging.debug(cmd)
1337 ret = call(cmd, shell=True)
1338 if ret != 0:
1339 logging.error("Bad exit status {ret} from rm-attr".format(ret=ret))
1340 ERRORS += 1
1341 continue
1342 # Check rm-attr with dry-run
1343 cmd = (CFSD_PREFIX + "--dry-run '{json}' rm-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1344 logging.debug(cmd)
1345 ret = call(cmd, shell=True, stdout=nullfd)
1346 if ret != 0:
1347 logging.error("Bad exit status {ret} from rm-attr".format(ret=ret))
1348 ERRORS += 1
1349 continue
1350 cmd = (CFSD_PREFIX + "'{json}' get-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1351 logging.debug(cmd)
1352 ret = call(cmd, shell=True, stderr=nullfd, stdout=nullfd)
1353 if ret == 0:
1354 logging.error("For rm-attr expect get-attr to fail, but it succeeded")
1355 ERRORS += 1
1356 # Put back value
1357 cmd = ("echo -n {val} | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey, val=val)
1358 logging.debug(cmd)
1359 ret = call(cmd, shell=True)
1360 if ret != 0:
1361 logging.error("Bad exit status {ret} from set-attr".format(ret=ret))
1362 ERRORS += 1
1363 continue
1364
1365 hdr = db[nspace][basename].get("omapheader", "")
1366 cmd = (CFSD_PREFIX + "'{json}' get-omaphdr").format(osd=osd, json=JSON)
1367 logging.debug(cmd)
1368 gethdr = check_output(cmd, shell=True)
1369 if gethdr != hdr:
1370 logging.error("get-omaphdr was wrong: {get} instead of {orig}".format(get=gethdr, orig=hdr))
1371 ERRORS += 1
1372 continue
1373 # set-omaphdr to bogus value "foobar"
1374 cmd = ("echo -n foobar | " + CFSD_PREFIX + "'{json}' set-omaphdr").format(osd=osd, pg=pg, json=JSON)
1375 logging.debug(cmd)
1376 ret = call(cmd, shell=True)
1377 if ret != 0:
1378 logging.error("Bad exit status {ret} from set-omaphdr".format(ret=ret))
1379 ERRORS += 1
1380 continue
1381 # Check the set-omaphdr
1382 cmd = (CFSD_PREFIX + "'{json}' get-omaphdr").format(osd=osd, pg=pg, json=JSON)
1383 logging.debug(cmd)
1384 gethdr = check_output(cmd, shell=True)
1385 if ret != 0:
1386 logging.error("Bad exit status {ret} from get-omaphdr".format(ret=ret))
1387 ERRORS += 1
1388 continue
1389 if gethdr != "foobar":
1390 logging.error("Check of set-omaphdr failed because we got {val}".format(val=getval))
1391 ERRORS += 1
1392 continue
1393 # Test dry-run with set-omaphdr
1394 cmd = ("echo -n dryrunbroken | " + CFSD_PREFIX + "--dry-run '{json}' set-omaphdr").format(osd=osd, pg=pg, json=JSON)
1395 logging.debug(cmd)
1396 ret = call(cmd, shell=True, stdout=nullfd)
1397 if ret != 0:
1398 logging.error("Bad exit status {ret} from set-omaphdr".format(ret=ret))
1399 ERRORS += 1
1400 continue
1401 # Put back value
1402 cmd = ("echo -n {val} | " + CFSD_PREFIX + "'{json}' set-omaphdr").format(osd=osd, pg=pg, json=JSON, val=hdr)
1403 logging.debug(cmd)
1404 ret = call(cmd, shell=True)
1405 if ret != 0:
1406 logging.error("Bad exit status {ret} from set-omaphdr".format(ret=ret))
1407 ERRORS += 1
1408 continue
1409
1410 for omapkey, val in db[nspace][basename]["omap"].items():
1411 cmd = (CFSD_PREFIX + " '{json}' get-omap {key}").format(osd=osd, json=JSON, key=omapkey)
1412 logging.debug(cmd)
1413 getval = check_output(cmd, shell=True)
1414 if getval != val:
1415 logging.error("get-omap of key {key} returned wrong val: {get} instead of {orig}".format(key=omapkey, get=getval, orig=val))
1416 ERRORS += 1
1417 continue
1418 # set-omap to bogus value "foobar"
1419 cmd = ("echo -n foobar | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1420 logging.debug(cmd)
1421 ret = call(cmd, shell=True)
1422 if ret != 0:
1423 logging.error("Bad exit status {ret} from set-omap".format(ret=ret))
1424 ERRORS += 1
1425 continue
1426 # Check set-omap with dry-run
1427 cmd = ("echo -n dryrunbroken | " + CFSD_PREFIX + "--dry-run --pgid {pg} '{json}' set-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1428 logging.debug(cmd)
1429 ret = call(cmd, shell=True, stdout=nullfd)
1430 if ret != 0:
1431 logging.error("Bad exit status {ret} from set-omap".format(ret=ret))
1432 ERRORS += 1
1433 continue
1434 # Check the set-omap
1435 cmd = (CFSD_PREFIX + " --pgid {pg} '{json}' get-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1436 logging.debug(cmd)
1437 getval = check_output(cmd, shell=True)
1438 if ret != 0:
1439 logging.error("Bad exit status {ret} from get-omap".format(ret=ret))
1440 ERRORS += 1
1441 continue
1442 if getval != "foobar":
1443 logging.error("Check of set-omap failed because we got {val}".format(val=getval))
1444 ERRORS += 1
1445 continue
1446 # Test rm-omap
1447 cmd = (CFSD_PREFIX + "'{json}' rm-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1448 logging.debug(cmd)
1449 ret = call(cmd, shell=True)
1450 if ret != 0:
1451 logging.error("Bad exit status {ret} from rm-omap".format(ret=ret))
1452 ERRORS += 1
1453 # Check rm-omap with dry-run
1454 cmd = (CFSD_PREFIX + "--dry-run '{json}' rm-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1455 logging.debug(cmd)
1456 ret = call(cmd, shell=True, stdout=nullfd)
1457 if ret != 0:
1458 logging.error("Bad exit status {ret} from rm-omap".format(ret=ret))
1459 ERRORS += 1
1460 cmd = (CFSD_PREFIX + "'{json}' get-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1461 logging.debug(cmd)
1462 ret = call(cmd, shell=True, stderr=nullfd, stdout=nullfd)
1463 if ret == 0:
1464 logging.error("For rm-omap expect get-omap to fail, but it succeeded")
1465 ERRORS += 1
1466 # Put back value
1467 cmd = ("echo -n {val} | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey, val=val)
1468 logging.debug(cmd)
1469 ret = call(cmd, shell=True)
1470 if ret != 0:
1471 logging.error("Bad exit status {ret} from set-omap".format(ret=ret))
1472 ERRORS += 1
1473 continue
1474
1475 # Test dump
1476 print("Test dump")
1477 for nspace in db.keys():
1478 for basename in db[nspace].keys():
1479 file = os.path.join(DATADIR, nspace + "-" + basename + "__head")
1480 JSON = db[nspace][basename]['json']
1481 jsondict = json.loads(JSON)
1482 for pg in OBJREPPGS:
1483 OSDS = get_osds(pg, OSDDIR)
1484 for osd in OSDS:
1485 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1486 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1487 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1488 if not fnames:
1489 continue
1490 if int(basename.split(REP_NAME)[1]) > int(NUM_CLONED_REP_OBJECTS):
1491 continue
1492 logging.debug("REPobject " + JSON)
1493 cmd = (CFSD_PREFIX + " '{json}' dump | grep '\"snap\": 1,' > /dev/null").format(osd=osd, json=JSON)
1494 logging.debug(cmd)
1495 ret = call(cmd, shell=True)
1496 if ret != 0:
1497 logging.error("Invalid dump for {json}".format(json=JSON))
1498 ERRORS += 1
1499 if 'shard_id' in jsondict[1]:
1500 logging.debug("ECobject " + JSON)
1501 for pg in OBJECPGS:
1502 OSDS = get_osds(pg, OSDDIR)
1503 jsondict = json.loads(JSON)
1504 for osd in OSDS:
1505 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1506 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1507 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1508 if not fnames:
1509 continue
1510 if int(basename.split(EC_NAME)[1]) > int(NUM_EC_OBJECTS):
1511 continue
1512 # Fix shard_id since we only have one json instance for each object
1513 jsondict[1]['shard_id'] = int(pg.split('s')[1])
1514 cmd = (CFSD_PREFIX + " '{json}' dump | grep '\"hinfo\": [{{]' > /dev/null").format(osd=osd, json=json.dumps((pg, jsondict[1])))
1515 logging.debug(cmd)
1516 ret = call(cmd, shell=True)
1517 if ret != 0:
1518 logging.error("Invalid dump for {json}".format(json=JSON))
1519
1520 print("Test list-attrs get-attr")
1521 ATTRFILE = r"/tmp/attrs.{pid}".format(pid=pid)
1522 VALFILE = r"/tmp/val.{pid}".format(pid=pid)
1523 for nspace in db.keys():
1524 for basename in db[nspace].keys():
1525 file = os.path.join(DATADIR, nspace + "-" + basename)
1526 JSON = db[nspace][basename]['json']
1527 jsondict = json.loads(JSON)
1528
1529 if 'shard_id' in jsondict[1]:
1530 logging.debug("ECobject " + JSON)
1531 found = 0
1532 for pg in OBJECPGS:
1533 OSDS = get_osds(pg, OSDDIR)
1534 # Fix shard_id since we only have one json instance for each object
1535 jsondict[1]['shard_id'] = int(pg.split('s')[1])
1536 JSON = json.dumps((pg, jsondict[1]))
1537 for osd in OSDS:
1538 cmd = (CFSD_PREFIX + " --tty '{json}' get-attr hinfo_key").format(osd=osd, json=JSON)
1539 logging.debug("TRY: " + cmd)
1540 try:
1541 out = check_output(cmd, shell=True, stderr=subprocess.STDOUT)
1542 logging.debug("FOUND: {json} in {osd} has value '{val}'".format(osd=osd, json=JSON, val=out))
1543 found += 1
1544 except subprocess.CalledProcessError as e:
1545 logging.debug("Error message: {output}".format(output=e.output))
1546 if "No such file or directory" not in str(e.output) and \
1547 "No data available" not in str(e.output) and \
1548 "not contained by pg" not in str(e.output):
1549 raise
1550 # Assuming k=2 m=1 for the default ec pool
1551 if found != 3:
1552 logging.error("{json} hinfo_key found {found} times instead of 3".format(json=JSON, found=found))
1553 ERRORS += 1
1554
1555 for pg in ALLPGS:
1556 # Make sure rep obj with rep pg or ec obj with ec pg
1557 if ('shard_id' in jsondict[1]) != (pg.find('s') > 0):
1558 continue
1559 if 'shard_id' in jsondict[1]:
1560 # Fix shard_id since we only have one json instance for each object
1561 jsondict[1]['shard_id'] = int(pg.split('s')[1])
1562 JSON = json.dumps((pg, jsondict[1]))
1563 OSDS = get_osds(pg, OSDDIR)
1564 for osd in OSDS:
1565 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1566 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1567 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1568 if not fnames:
1569 continue
1570 afd = open(ATTRFILE, "wb")
1571 cmd = (CFSD_PREFIX + " '{json}' list-attrs").format(osd=osd, json=JSON)
1572 logging.debug(cmd)
1573 ret = call(cmd, shell=True, stdout=afd)
1574 afd.close()
1575 if ret != 0:
1576 logging.error("list-attrs failed with {ret}".format(ret=ret))
1577 ERRORS += 1
1578 continue
1579 keys = get_lines(ATTRFILE)
1580 values = dict(db[nspace][basename]["xattr"])
1581 for key in keys:
1582 if key == "_" or key == "snapset" or key == "hinfo_key":
1583 continue
1584 key = key.strip("_")
1585 if key not in values:
1586 logging.error("Unexpected key {key} present".format(key=key))
1587 ERRORS += 1
1588 continue
1589 exp = values.pop(key)
1590 vfd = open(VALFILE, "wb")
1591 cmd = (CFSD_PREFIX + " '{json}' get-attr {key}").format(osd=osd, json=JSON, key="_" + key)
1592 logging.debug(cmd)
1593 ret = call(cmd, shell=True, stdout=vfd)
1594 vfd.close()
1595 if ret != 0:
1596 logging.error("get-attr failed with {ret}".format(ret=ret))
1597 ERRORS += 1
1598 continue
1599 lines = get_lines(VALFILE)
1600 val = lines[0]
1601 if exp != val:
1602 logging.error("For key {key} got value {got} instead of {expected}".format(key=key, got=val, expected=exp))
1603 ERRORS += 1
1604 if len(values) != 0:
1605 logging.error("Not all keys found, remaining keys:")
1606 print(values)
1607
1608 print("Test --op meta-list")
1609 tmpfd = open(TMPFILE, "wb")
1610 cmd = (CFSD_PREFIX + "--op meta-list").format(osd=ONEOSD)
1611 logging.debug(cmd)
1612 ret = call(cmd, shell=True, stdout=tmpfd)
1613 if ret != 0:
1614 logging.error("Bad exit status {ret} from --op meta-list request".format(ret=ret))
1615 ERRORS += 1
1616
1617 print("Test get-bytes on meta")
1618 tmpfd.close()
1619 lines = get_lines(TMPFILE)
1620 JSONOBJ = sorted(set(lines))
1621 for JSON in JSONOBJ:
1622 (pgid, jsondict) = json.loads(JSON)
1623 if pgid != "meta":
1624 logging.error("pgid incorrect for --op meta-list {pgid}".format(pgid=pgid))
1625 ERRORS += 1
1626 if jsondict['namespace'] != "":
1627 logging.error("namespace non null --op meta-list {ns}".format(ns=jsondict['namespace']))
1628 ERRORS += 1
1629 logging.info(JSON)
1630 try:
1631 os.unlink(GETNAME)
1632 except:
1633 pass
1634 cmd = (CFSD_PREFIX + "'{json}' get-bytes {fname}").format(osd=ONEOSD, json=JSON, fname=GETNAME)
1635 logging.debug(cmd)
1636 ret = call(cmd, shell=True)
1637 if ret != 0:
1638 logging.error("Bad exit status {ret}".format(ret=ret))
1639 ERRORS += 1
1640
1641 try:
1642 os.unlink(GETNAME)
1643 except:
1644 pass
1645 try:
1646 os.unlink(TESTNAME)
1647 except:
1648 pass
1649
1650 print("Test pg info")
1651 for pg in ALLREPPGS + ALLECPGS:
1652 for osd in get_osds(pg, OSDDIR):
1653 cmd = (CFSD_PREFIX + "--op info --pgid {pg} | grep '\"pgid\": \"{pg}\"'").format(osd=osd, pg=pg)
1654 logging.debug(cmd)
1655 ret = call(cmd, shell=True, stdout=nullfd)
1656 if ret != 0:
1657 logging.error("Getting info failed for pg {pg} from {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1658 ERRORS += 1
1659
1660 print("Test pg logging")
1661 if len(ALLREPPGS + ALLECPGS) == len(OBJREPPGS + OBJECPGS):
1662 logging.warning("All PGs have objects, so no log without modify entries")
1663 for pg in ALLREPPGS + ALLECPGS:
1664 for osd in get_osds(pg, OSDDIR):
1665 tmpfd = open(TMPFILE, "wb")
1666 cmd = (CFSD_PREFIX + "--op log --pgid {pg}").format(osd=osd, pg=pg)
1667 logging.debug(cmd)
1668 ret = call(cmd, shell=True, stdout=tmpfd)
1669 if ret != 0:
1670 logging.error("Getting log failed for pg {pg} from {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1671 ERRORS += 1
1672 HASOBJ = pg in OBJREPPGS + OBJECPGS
1673 MODOBJ = False
1674 for line in get_lines(TMPFILE):
1675 if line.find("modify") != -1:
1676 MODOBJ = True
1677 break
1678 if HASOBJ != MODOBJ:
1679 logging.error("Bad log for pg {pg} from {osd}".format(pg=pg, osd=osd))
1680 MSG = (HASOBJ and [""] or ["NOT "])[0]
1681 print("Log should {msg}have a modify entry".format(msg=MSG))
1682 ERRORS += 1
1683
1684 try:
1685 os.unlink(TMPFILE)
1686 except:
1687 pass
1688
1689 print("Test list-pgs")
1690 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1691
1692 CHECK_PGS = get_osd_pgs(os.path.join(OSDDIR, osd), None)
1693 CHECK_PGS = sorted(CHECK_PGS)
1694
1695 cmd = (CFSD_PREFIX + "--op list-pgs").format(osd=osd)
1696 logging.debug(cmd)
1697 TEST_PGS = check_output(cmd, shell=True).split("\n")
1698 TEST_PGS = sorted(TEST_PGS)[1:] # Skip extra blank line
1699
1700 if TEST_PGS != CHECK_PGS:
1701 logging.error("list-pgs got wrong result for osd.{osd}".format(osd=osd))
1702 logging.error("Expected {pgs}".format(pgs=CHECK_PGS))
1703 logging.error("Got {pgs}".format(pgs=TEST_PGS))
1704 ERRORS += 1
1705
1706 EXP_ERRORS = 0
1707 print("Test pg export --dry-run")
1708 pg = ALLREPPGS[0]
1709 osd = get_osds(pg, OSDDIR)[0]
1710 fname = "/tmp/fname.{pid}".format(pid=pid)
1711 cmd = (CFSD_PREFIX + "--dry-run --op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
1712 logging.debug(cmd)
1713 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1714 if ret != 0:
1715 logging.error("Exporting --dry-run failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1716 EXP_ERRORS += 1
1717 elif os.path.exists(fname):
1718 logging.error("Exporting --dry-run created file")
1719 EXP_ERRORS += 1
1720
1721 cmd = (CFSD_PREFIX + "--dry-run --op export --pgid {pg} > {file}").format(osd=osd, pg=pg, file=fname)
1722 logging.debug(cmd)
1723 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1724 if ret != 0:
1725 logging.error("Exporting --dry-run failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1726 EXP_ERRORS += 1
1727 else:
1728 outdata = get_lines(fname)
1729 if len(outdata) > 0:
1730 logging.error("Exporting --dry-run to stdout not empty")
1731 logging.error("Data: " + outdata)
1732 EXP_ERRORS += 1
1733
1734 os.mkdir(TESTDIR)
1735 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1736 os.mkdir(os.path.join(TESTDIR, osd))
1737 print("Test pg export")
1738 for pg in ALLREPPGS + ALLECPGS:
1739 for osd in get_osds(pg, OSDDIR):
1740 mydir = os.path.join(TESTDIR, osd)
1741 fname = os.path.join(mydir, pg)
1742 if pg == ALLREPPGS[0]:
1743 cmd = (CFSD_PREFIX + "--op export --pgid {pg} > {file}").format(osd=osd, pg=pg, file=fname)
1744 elif pg == ALLREPPGS[1]:
1745 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file - > {file}").format(osd=osd, pg=pg, file=fname)
1746 else:
1747 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
1748 logging.debug(cmd)
1749 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1750 if ret != 0:
1751 logging.error("Exporting failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1752 EXP_ERRORS += 1
1753
1754 ERRORS += EXP_ERRORS
1755
1756 print("Test clear-data-digest")
1757 for nspace in db.keys():
1758 for basename in db[nspace].keys():
1759 JSON = db[nspace][basename]['json']
1760 cmd = (CFSD_PREFIX + "'{json}' clear-data-digest").format(osd='osd0', json=JSON)
1761 logging.debug(cmd)
1762 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1763 if ret != 0:
1764 logging.error("Clearing data digest failed for {json}".format(json=JSON))
1765 ERRORS += 1
1766 break
1767 cmd = (CFSD_PREFIX + "'{json}' dump | grep '\"data_digest\": \"0xff'").format(osd='osd0', json=JSON)
1768 logging.debug(cmd)
1769 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1770 if ret != 0:
1771 logging.error("Data digest not cleared for {json}".format(json=JSON))
1772 ERRORS += 1
1773 break
1774 break
1775 break
1776
1777 print("Test pg removal")
1778 RM_ERRORS = 0
1779 for pg in ALLREPPGS + ALLECPGS:
1780 for osd in get_osds(pg, OSDDIR):
1781 # This should do nothing
1782 cmd = (CFSD_PREFIX + "--op remove --pgid {pg} --dry-run").format(pg=pg, osd=osd)
1783 logging.debug(cmd)
1784 ret = call(cmd, shell=True, stdout=nullfd)
1785 if ret != 0:
1786 logging.error("Removing --dry-run failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1787 RM_ERRORS += 1
1788 cmd = (CFSD_PREFIX + "--force --op remove --pgid {pg}").format(pg=pg, osd=osd)
1789 logging.debug(cmd)
1790 ret = call(cmd, shell=True, stdout=nullfd)
1791 if ret != 0:
1792 logging.error("Removing failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1793 RM_ERRORS += 1
1794
1795 ERRORS += RM_ERRORS
1796
1797 IMP_ERRORS = 0
1798 if EXP_ERRORS == 0 and RM_ERRORS == 0:
1799 print("Test pg import")
1800 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1801 dir = os.path.join(TESTDIR, osd)
1802 PGS = [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))]
1803 for pg in PGS:
1804 file = os.path.join(dir, pg)
1805 # Make sure this doesn't crash
1806 cmd = (CFSD_PREFIX + "--op dump-export --file {file}").format(osd=osd, file=file)
1807 logging.debug(cmd)
1808 ret = call(cmd, shell=True, stdout=nullfd)
1809 if ret != 0:
1810 logging.error("Dump-export failed from {file} with {ret}".format(file=file, ret=ret))
1811 IMP_ERRORS += 1
1812 # This should do nothing
1813 cmd = (CFSD_PREFIX + "--op import --file {file} --dry-run").format(osd=osd, file=file)
1814 logging.debug(cmd)
1815 ret = call(cmd, shell=True, stdout=nullfd)
1816 if ret != 0:
1817 logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret))
1818 IMP_ERRORS += 1
1819 if pg == PGS[0]:
1820 cmd = ("cat {file} |".format(file=file) + CFSD_PREFIX + "--op import").format(osd=osd)
1821 elif pg == PGS[1]:
1822 cmd = (CFSD_PREFIX + "--op import --file - --pgid {pg} < {file}").format(osd=osd, file=file, pg=pg)
1823 else:
1824 cmd = (CFSD_PREFIX + "--op import --file {file}").format(osd=osd, file=file)
1825 logging.debug(cmd)
1826 ret = call(cmd, shell=True, stdout=nullfd)
1827 if ret != 0:
1828 logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret))
1829 IMP_ERRORS += 1
1830 else:
1831 logging.warning("SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES")
1832
1833 ERRORS += IMP_ERRORS
1834 logging.debug(cmd)
1835
1836 if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0:
1837 print("Verify replicated import data")
1838 data_errors, _ = check_data(DATADIR, TMPFILE, OSDDIR, REP_NAME)
1839 ERRORS += data_errors
1840 else:
1841 logging.warning("SKIPPING CHECKING IMPORT DATA DUE TO PREVIOUS FAILURES")
1842
1843 print("Test all --op dump-journal again")
1844 ALLOSDS = [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]
1845 ERRORS += test_dump_journal(CFSD_PREFIX, ALLOSDS)
1846
1847 vstart(new=False)
1848 wait_for_health()
1849
1850 if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0:
1851 print("Verify erasure coded import data")
1852 ERRORS += verify(DATADIR, EC_POOL, EC_NAME, db)
1853 # Check replicated data/xattr/omap using rados
1854 print("Verify replicated import data using rados")
1855 ERRORS += verify(DATADIR, REP_POOL, REP_NAME, db)
1856
1857 if EXP_ERRORS == 0:
1858 NEWPOOL = "rados-import-pool"
1859 cmd = "{path}/ceph osd pool create {pool} 8".format(pool=NEWPOOL, path=CEPH_BIN)
1860 logging.debug(cmd)
1861 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1862
1863 print("Test rados import")
1864 first = True
1865 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1866 dir = os.path.join(TESTDIR, osd)
1867 for pg in [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))]:
1868 if pg.find("{id}.".format(id=REPID)) != 0:
1869 continue
1870 file = os.path.join(dir, pg)
1871 if first:
1872 first = False
1873 # This should do nothing
1874 cmd = "{path}/rados import -p {pool} --dry-run {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN)
1875 logging.debug(cmd)
1876 ret = call(cmd, shell=True, stdout=nullfd)
1877 if ret != 0:
1878 logging.error("Rados import --dry-run failed from {file} with {ret}".format(file=file, ret=ret))
1879 ERRORS += 1
1880 cmd = "{path}/rados -p {pool} ls".format(pool=NEWPOOL, path=CEPH_BIN)
1881 logging.debug(cmd)
1882 data = check_output(cmd, shell=True)
1883 if data:
1884 logging.error("'{data}'".format(data=data))
1885 logging.error("Found objects after dry-run")
1886 ERRORS += 1
1887 cmd = "{path}/rados import -p {pool} {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN)
1888 logging.debug(cmd)
1889 ret = call(cmd, shell=True, stdout=nullfd)
1890 if ret != 0:
1891 logging.error("Rados import failed from {file} with {ret}".format(file=file, ret=ret))
1892 ERRORS += 1
1893 cmd = "{path}/rados import -p {pool} --no-overwrite {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN)
1894 logging.debug(cmd)
1895 ret = call(cmd, shell=True, stdout=nullfd)
1896 if ret != 0:
1897 logging.error("Rados import --no-overwrite failed from {file} with {ret}".format(file=file, ret=ret))
1898 ERRORS += 1
1899
1900 ERRORS += verify(DATADIR, NEWPOOL, REP_NAME, db)
1901 else:
1902 logging.warning("SKIPPING IMPORT-RADOS TESTS DUE TO PREVIOUS FAILURES")
1903
1904 # Clear directories of previous portion
1905 call("/bin/rm -rf {dir}".format(dir=TESTDIR), shell=True)
1906 call("/bin/rm -rf {dir}".format(dir=DATADIR), shell=True)
1907 os.mkdir(TESTDIR)
1908 os.mkdir(DATADIR)
1909
1910 # Cause SPLIT_POOL to split and test import with object/log filtering
1911 print("Testing import all objects after a split")
1912 SPLIT_POOL = "split_pool"
1913 PG_COUNT = 1
1914 SPLIT_OBJ_COUNT = 5
1915 SPLIT_NSPACE_COUNT = 2
1916 SPLIT_NAME = "split"
1917 cmd = "{path}/ceph osd pool create {pool} {pg} {pg} replicated".format(pool=SPLIT_POOL, pg=PG_COUNT, path=CEPH_BIN)
1918 logging.debug(cmd)
1919 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1920 SPLITID = get_pool_id(SPLIT_POOL, nullfd)
1921 pool_size = int(check_output("{path}/ceph osd pool get {pool} size".format(pool=SPLIT_POOL, path=CEPH_BIN), shell=True, stderr=nullfd).split(" ")[1])
1922 EXP_ERRORS = 0
1923 RM_ERRORS = 0
1924 IMP_ERRORS = 0
1925
1926 objects = range(1, SPLIT_OBJ_COUNT + 1)
1927 nspaces = range(SPLIT_NSPACE_COUNT)
1928 for n in nspaces:
1929 nspace = get_nspace(n)
1930
1931 for i in objects:
1932 NAME = SPLIT_NAME + "{num}".format(num=i)
1933 LNAME = nspace + "-" + NAME
1934 DDNAME = os.path.join(DATADIR, LNAME)
1935 DDNAME += "__head"
1936
1937 cmd = "rm -f " + DDNAME
1938 logging.debug(cmd)
1939 call(cmd, shell=True)
1940
1941 if i == 1:
1942 dataline = range(DATALINECOUNT)
1943 else:
1944 dataline = range(1)
1945 fd = open(DDNAME, "w")
1946 data = "This is the split data for " + LNAME + "\n"
1947 for _ in dataline:
1948 fd.write(data)
1949 fd.close()
1950
1951 cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=SPLIT_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
1952 logging.debug(cmd)
1953 ret = call(cmd, shell=True, stderr=nullfd)
1954 if ret != 0:
1955 logging.critical("Rados put command failed with {ret}".format(ret=ret))
1956 return 1
1957
1958 wait_for_health()
1959 kill_daemons()
1960
1961 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1962 os.mkdir(os.path.join(TESTDIR, osd))
1963
1964 pg = "{pool}.0".format(pool=SPLITID)
1965 EXPORT_PG = pg
1966
1967 export_osds = get_osds(pg, OSDDIR)
1968 for osd in export_osds:
1969 mydir = os.path.join(TESTDIR, osd)
1970 fname = os.path.join(mydir, pg)
1971 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
1972 logging.debug(cmd)
1973 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1974 if ret != 0:
1975 logging.error("Exporting failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1976 EXP_ERRORS += 1
1977
1978 ERRORS += EXP_ERRORS
1979
1980 if EXP_ERRORS == 0:
1981 vstart(new=False)
1982 wait_for_health()
1983
1984 cmd = "{path}/ceph osd pool set {pool} pg_num 2".format(pool=SPLIT_POOL, path=CEPH_BIN)
1985 logging.debug(cmd)
1986 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1987 time.sleep(5)
1988 wait_for_health()
1989
1990 kill_daemons()
1991
1992 # Now 2 PGs, poolid.0 and poolid.1
1993 # make note of pgs before we remove the pgs...
1994 osds = get_osds("{pool}.0".format(pool=SPLITID), OSDDIR);
1995 for seed in range(2):
1996 pg = "{pool}.{seed}".format(pool=SPLITID, seed=seed)
1997
1998 for osd in osds:
1999 cmd = (CFSD_PREFIX + "--force --op remove --pgid {pg}").format(pg=pg, osd=osd)
2000 logging.debug(cmd)
2001 ret = call(cmd, shell=True, stdout=nullfd)
2002
2003 which = 0
2004 for osd in osds:
2005 # This is weird. The export files are based on only the EXPORT_PG
2006 # and where that pg was before the split. Use 'which' to use all
2007 # export copies in import.
2008 mydir = os.path.join(TESTDIR, export_osds[which])
2009 fname = os.path.join(mydir, EXPORT_PG)
2010 which += 1
2011 cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=osd, pg=EXPORT_PG, file=fname)
2012 logging.debug(cmd)
2013 ret = call(cmd, shell=True, stdout=nullfd)
2014 if ret != 0:
2015 logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret))
2016 IMP_ERRORS += 1
2017
2018 ERRORS += IMP_ERRORS
2019
2020 # Start up again to make sure imports didn't corrupt anything
2021 if IMP_ERRORS == 0:
2022 print("Verify split import data")
2023 data_errors, count = check_data(DATADIR, TMPFILE, OSDDIR, SPLIT_NAME)
2024 ERRORS += data_errors
2025 if count != (SPLIT_OBJ_COUNT * SPLIT_NSPACE_COUNT * pool_size):
2026 logging.error("Incorrect number of replicas seen {count}".format(count=count))
2027 ERRORS += 1
2028 vstart(new=False)
2029 wait_for_health()
2030
2031 call("/bin/rm -rf {dir}".format(dir=TESTDIR), shell=True)
2032 call("/bin/rm -rf {dir}".format(dir=DATADIR), shell=True)
2033
2034 ERRORS += test_removeall(CFSD_PREFIX, db, OBJREPPGS, REP_POOL, CEPH_BIN, OSDDIR, REP_NAME, NUM_CLONED_REP_OBJECTS)
2035
2036 # vstart() starts 4 OSDs
2037 ERRORS += test_get_set_osdmap(CFSD_PREFIX, list(range(4)), ALLOSDS)
2038 ERRORS += test_get_set_inc_osdmap(CFSD_PREFIX, ALLOSDS[0])
2039
2040 kill_daemons()
2041 CORES = [f for f in os.listdir(CEPH_DIR) if f.startswith("core.")]
2042 if CORES:
2043 CORE_DIR = os.path.join("/tmp", "cores.{pid}".format(pid=os.getpid()))
2044 os.mkdir(CORE_DIR)
2045 call("/bin/mv {ceph_dir}/core.* {core_dir}".format(ceph_dir=CEPH_DIR, core_dir=CORE_DIR), shell=True)
2046 logging.error("Failure due to cores found")
2047 logging.error("See {core_dir} for cores".format(core_dir=CORE_DIR))
2048 ERRORS += len(CORES)
2049
2050 if ERRORS == 0:
2051 print("TEST PASSED")
2052 return 0
2053 else:
2054 print("TEST FAILED WITH {errcount} ERRORS".format(errcount=ERRORS))
2055 return 1
2056
2057
2058 def remove_btrfs_subvolumes(path):
2059 if platform.system() == "FreeBSD":
2060 return
2061 result = subprocess.Popen("stat -f -c '%%T' %s" % path, shell=True, stdout=subprocess.PIPE)
2062 for line in result.stdout:
2063 filesystem = decode(line).rstrip('\n')
2064 if filesystem == "btrfs":
2065 result = subprocess.Popen("sudo btrfs subvolume list %s" % path, shell=True, stdout=subprocess.PIPE)
2066 for line in result.stdout:
2067 subvolume = decode(line).split()[8]
2068 # extracting the relative volume name
2069 m = re.search(".*(%s.*)" % path, subvolume)
2070 if m:
2071 found = m.group(1)
2072 call("sudo btrfs subvolume delete %s" % found, shell=True)
2073
2074
2075 if __name__ == "__main__":
2076 status = 1
2077 try:
2078 status = main(sys.argv[1:])
2079 finally:
2080 kill_daemons()
2081 os.chdir(CEPH_BUILD_DIR)
2082 remove_btrfs_subvolumes(CEPH_DIR)
2083 call("/bin/rm -fr {dir}".format(dir=CEPH_DIR), shell=True)
2084 sys.exit(status)