]> git.proxmox.com Git - ceph.git/blob - ceph/qa/standalone/special/ceph_objectstore_tool.py
import ceph 14.2.5
[ceph.git] / ceph / qa / standalone / special / ceph_objectstore_tool.py
1 #!/usr/bin/env python
2
3 from __future__ import print_function
4 from subprocess import call
5 try:
6 from subprocess import check_output
7 except ImportError:
8 def check_output(*popenargs, **kwargs):
9 import subprocess
10 # backported from python 2.7 stdlib
11 process = subprocess.Popen(
12 stdout=subprocess.PIPE, *popenargs, **kwargs)
13 output, unused_err = process.communicate()
14 retcode = process.poll()
15 if retcode:
16 cmd = kwargs.get("args")
17 if cmd is None:
18 cmd = popenargs[0]
19 error = subprocess.CalledProcessError(retcode, cmd)
20 error.output = output
21 raise error
22 return output
23
24 import filecmp
25 import os
26 import subprocess
27 import math
28 import time
29 import sys
30 import re
31 import logging
32 import json
33 import tempfile
34 import platform
35
36 try:
37 from subprocess import DEVNULL
38 except ImportError:
39 DEVNULL = open(os.devnull, "wb")
40
41 logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.WARNING)
42
43
44 if sys.version_info[0] >= 3:
45 def decode(s):
46 return s.decode('utf-8')
47
48 def check_output(*args, **kwargs):
49 return decode(subprocess.check_output(*args, **kwargs))
50 else:
51 def decode(s):
52 return s
53
54
55
56 def wait_for_health():
57 print("Wait for health_ok...", end="")
58 tries = 0
59 while call("{path}/ceph health 2> /dev/null | grep -v 'HEALTH_OK\|HEALTH_WARN' > /dev/null".format(path=CEPH_BIN), shell=True) == 0:
60 tries += 1
61 if tries == 150:
62 raise Exception("Time exceeded to go to health")
63 time.sleep(1)
64 print("DONE")
65
66
67 def get_pool_id(name, nullfd):
68 cmd = "{path}/ceph osd pool stats {pool}".format(pool=name, path=CEPH_BIN).split()
69 # pool {pool} id # .... grab the 4 field
70 return check_output(cmd, stderr=nullfd).split()[3]
71
72
73 # return a list of unique PGS given an osd subdirectory
74 def get_osd_pgs(SUBDIR, ID):
75 PGS = []
76 if ID:
77 endhead = re.compile("{id}.*_head$".format(id=ID))
78 DIR = os.path.join(SUBDIR, "current")
79 PGS += [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and (ID is None or endhead.match(f))]
80 PGS = [re.sub("_head", "", p) for p in PGS if "_head" in p]
81 return PGS
82
83
84 # return a sorted list of unique PGs given a directory
85 def get_pgs(DIR, ID):
86 OSDS = [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and f.find("osd") == 0]
87 PGS = []
88 for d in OSDS:
89 SUBDIR = os.path.join(DIR, d)
90 PGS += get_osd_pgs(SUBDIR, ID)
91 return sorted(set(PGS))
92
93
94 # return a sorted list of PGS a subset of ALLPGS that contain objects with prefix specified
95 def get_objs(ALLPGS, prefix, DIR, ID):
96 OSDS = [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and f.find("osd") == 0]
97 PGS = []
98 for d in OSDS:
99 DIRL2 = os.path.join(DIR, d)
100 SUBDIR = os.path.join(DIRL2, "current")
101 for p in ALLPGS:
102 PGDIR = p + "_head"
103 if not os.path.isdir(os.path.join(SUBDIR, PGDIR)):
104 continue
105 FINALDIR = os.path.join(SUBDIR, PGDIR)
106 # See if there are any objects there
107 if any(f for f in [val for _, _, fl in os.walk(FINALDIR) for val in fl] if f.startswith(prefix)):
108 PGS += [p]
109 return sorted(set(PGS))
110
111
112 # return a sorted list of OSDS which have data from a given PG
113 def get_osds(PG, DIR):
114 ALLOSDS = [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and f.find("osd") == 0]
115 OSDS = []
116 for d in ALLOSDS:
117 DIRL2 = os.path.join(DIR, d)
118 SUBDIR = os.path.join(DIRL2, "current")
119 PGDIR = PG + "_head"
120 if not os.path.isdir(os.path.join(SUBDIR, PGDIR)):
121 continue
122 OSDS += [d]
123 return sorted(OSDS)
124
125
126 def get_lines(filename):
127 tmpfd = open(filename, "r")
128 line = True
129 lines = []
130 while line:
131 line = tmpfd.readline().rstrip('\n')
132 if line:
133 lines += [line]
134 tmpfd.close()
135 os.unlink(filename)
136 return lines
137
138
139 def cat_file(level, filename):
140 if level < logging.getLogger().getEffectiveLevel():
141 return
142 print("File: " + filename)
143 with open(filename, "r") as f:
144 while True:
145 line = f.readline().rstrip('\n')
146 if not line:
147 break
148 print(line)
149 print("<EOF>")
150
151
152 def vstart(new, opt=""):
153 print("vstarting....", end="")
154 NEW = new and "-n" or "-N"
155 call("MON=1 OSD=4 MDS=0 MGR=1 CEPH_PORT=7400 MGR_PYTHON_PATH={path}/src/pybind/mgr {path}/src/vstart.sh --filestore --short -l {new} -d {opt} > /dev/null 2>&1".format(new=NEW, opt=opt, path=CEPH_ROOT), shell=True)
156 print("DONE")
157
158
159 def test_failure(cmd, errmsg, tty=False):
160 if tty:
161 try:
162 ttyfd = open("/dev/tty", "rwb")
163 except Exception as e:
164 logging.info(str(e))
165 logging.info("SKIP " + cmd)
166 return 0
167 TMPFILE = r"/tmp/tmp.{pid}".format(pid=os.getpid())
168 tmpfd = open(TMPFILE, "wb")
169
170 logging.debug(cmd)
171 if tty:
172 ret = call(cmd, shell=True, stdin=ttyfd, stdout=ttyfd, stderr=tmpfd)
173 ttyfd.close()
174 else:
175 ret = call(cmd, shell=True, stderr=tmpfd)
176 tmpfd.close()
177 if ret == 0:
178 logging.error(cmd)
179 logging.error("Should have failed, but got exit 0")
180 return 1
181 lines = get_lines(TMPFILE)
182 matched = [ l for l in lines if errmsg in l ]
183 if any(matched):
184 logging.info("Correctly failed with message \"" + matched[0] + "\"")
185 return 0
186 else:
187 logging.error("Command: " + cmd )
188 logging.error("Bad messages to stderr \"" + str(lines) + "\"")
189 logging.error("Expected \"" + errmsg + "\"")
190 return 1
191
192
193 def get_nspace(num):
194 if num == 0:
195 return ""
196 return "ns{num}".format(num=num)
197
198
199 def verify(DATADIR, POOL, NAME_PREFIX, db):
200 TMPFILE = r"/tmp/tmp.{pid}".format(pid=os.getpid())
201 ERRORS = 0
202 for rawnsfile in [f for f in os.listdir(DATADIR) if f.split('-')[1].find(NAME_PREFIX) == 0]:
203 nsfile = rawnsfile.split("__")[0]
204 clone = rawnsfile.split("__")[1]
205 nspace = nsfile.split("-")[0]
206 file = nsfile.split("-")[1]
207 # Skip clones
208 if clone != "head":
209 continue
210 path = os.path.join(DATADIR, rawnsfile)
211 try:
212 os.unlink(TMPFILE)
213 except:
214 pass
215 cmd = "{path}/rados -p {pool} -N '{nspace}' get {file} {out}".format(pool=POOL, file=file, out=TMPFILE, nspace=nspace, path=CEPH_BIN)
216 logging.debug(cmd)
217 call(cmd, shell=True, stdout=DEVNULL, stderr=DEVNULL)
218 cmd = "diff -q {src} {result}".format(src=path, result=TMPFILE)
219 logging.debug(cmd)
220 ret = call(cmd, shell=True)
221 if ret != 0:
222 logging.error("{file} data not imported properly".format(file=file))
223 ERRORS += 1
224 try:
225 os.unlink(TMPFILE)
226 except:
227 pass
228 for key, val in db[nspace][file]["xattr"].items():
229 cmd = "{path}/rados -p {pool} -N '{nspace}' getxattr {name} {key}".format(pool=POOL, name=file, key=key, nspace=nspace, path=CEPH_BIN)
230 logging.debug(cmd)
231 getval = check_output(cmd, shell=True, stderr=DEVNULL)
232 logging.debug("getxattr {key} {val}".format(key=key, val=getval))
233 if getval != val:
234 logging.error("getxattr of key {key} returned wrong val: {get} instead of {orig}".format(key=key, get=getval, orig=val))
235 ERRORS += 1
236 continue
237 hdr = db[nspace][file].get("omapheader", "")
238 cmd = "{path}/rados -p {pool} -N '{nspace}' getomapheader {name} {file}".format(pool=POOL, name=file, nspace=nspace, file=TMPFILE, path=CEPH_BIN)
239 logging.debug(cmd)
240 ret = call(cmd, shell=True, stderr=DEVNULL)
241 if ret != 0:
242 logging.error("rados getomapheader returned {ret}".format(ret=ret))
243 ERRORS += 1
244 else:
245 getlines = get_lines(TMPFILE)
246 assert(len(getlines) == 0 or len(getlines) == 1)
247 if len(getlines) == 0:
248 gethdr = ""
249 else:
250 gethdr = getlines[0]
251 logging.debug("header: {hdr}".format(hdr=gethdr))
252 if gethdr != hdr:
253 logging.error("getomapheader returned wrong val: {get} instead of {orig}".format(get=gethdr, orig=hdr))
254 ERRORS += 1
255 for key, val in db[nspace][file]["omap"].items():
256 cmd = "{path}/rados -p {pool} -N '{nspace}' getomapval {name} {key} {file}".format(pool=POOL, name=file, key=key, nspace=nspace, file=TMPFILE, path=CEPH_BIN)
257 logging.debug(cmd)
258 ret = call(cmd, shell=True, stderr=DEVNULL)
259 if ret != 0:
260 logging.error("getomapval returned {ret}".format(ret=ret))
261 ERRORS += 1
262 continue
263 getlines = get_lines(TMPFILE)
264 if len(getlines) != 1:
265 logging.error("Bad data from getomapval {lines}".format(lines=getlines))
266 ERRORS += 1
267 continue
268 getval = getlines[0]
269 logging.debug("getomapval {key} {val}".format(key=key, val=getval))
270 if getval != val:
271 logging.error("getomapval returned wrong val: {get} instead of {orig}".format(get=getval, orig=val))
272 ERRORS += 1
273 try:
274 os.unlink(TMPFILE)
275 except:
276 pass
277 return ERRORS
278
279
280 def check_journal(jsondict):
281 errors = 0
282 if 'header' not in jsondict:
283 logging.error("Key 'header' not in dump-journal")
284 errors += 1
285 elif 'max_size' not in jsondict['header']:
286 logging.error("Key 'max_size' not in dump-journal header")
287 errors += 1
288 else:
289 print("\tJournal max_size = {size}".format(size=jsondict['header']['max_size']))
290 if 'entries' not in jsondict:
291 logging.error("Key 'entries' not in dump-journal output")
292 errors += 1
293 elif len(jsondict['entries']) == 0:
294 logging.info("No entries in journal found")
295 else:
296 errors += check_journal_entries(jsondict['entries'])
297 return errors
298
299
300 def check_journal_entries(entries):
301 errors = 0
302 for enum in range(len(entries)):
303 if 'offset' not in entries[enum]:
304 logging.error("No 'offset' key in entry {e}".format(e=enum))
305 errors += 1
306 if 'seq' not in entries[enum]:
307 logging.error("No 'seq' key in entry {e}".format(e=enum))
308 errors += 1
309 if 'transactions' not in entries[enum]:
310 logging.error("No 'transactions' key in entry {e}".format(e=enum))
311 errors += 1
312 elif len(entries[enum]['transactions']) == 0:
313 logging.error("No transactions found in entry {e}".format(e=enum))
314 errors += 1
315 else:
316 errors += check_entry_transactions(entries[enum], enum)
317 return errors
318
319
320 def check_entry_transactions(entry, enum):
321 errors = 0
322 for tnum in range(len(entry['transactions'])):
323 if 'trans_num' not in entry['transactions'][tnum]:
324 logging.error("Key 'trans_num' missing from entry {e} trans {t}".format(e=enum, t=tnum))
325 errors += 1
326 elif entry['transactions'][tnum]['trans_num'] != tnum:
327 ft = entry['transactions'][tnum]['trans_num']
328 logging.error("Bad trans_num ({ft}) entry {e} trans {t}".format(ft=ft, e=enum, t=tnum))
329 errors += 1
330 if 'ops' not in entry['transactions'][tnum]:
331 logging.error("Key 'ops' missing from entry {e} trans {t}".format(e=enum, t=tnum))
332 errors += 1
333 else:
334 errors += check_transaction_ops(entry['transactions'][tnum]['ops'], enum, tnum)
335 return errors
336
337
338 def check_transaction_ops(ops, enum, tnum):
339 if len(ops) is 0:
340 logging.warning("No ops found in entry {e} trans {t}".format(e=enum, t=tnum))
341 errors = 0
342 for onum in range(len(ops)):
343 if 'op_num' not in ops[onum]:
344 logging.error("Key 'op_num' missing from entry {e} trans {t} op {o}".format(e=enum, t=tnum, o=onum))
345 errors += 1
346 elif ops[onum]['op_num'] != onum:
347 fo = ops[onum]['op_num']
348 logging.error("Bad op_num ({fo}) from entry {e} trans {t} op {o}".format(fo=fo, e=enum, t=tnum, o=onum))
349 errors += 1
350 if 'op_name' not in ops[onum]:
351 logging.error("Key 'op_name' missing from entry {e} trans {t} op {o}".format(e=enum, t=tnum, o=onum))
352 errors += 1
353 return errors
354
355
356 def test_dump_journal(CFSD_PREFIX, osds):
357 ERRORS = 0
358 pid = os.getpid()
359 TMPFILE = r"/tmp/tmp.{pid}".format(pid=pid)
360
361 for osd in osds:
362 # Test --op dump-journal by loading json
363 cmd = (CFSD_PREFIX + "--op dump-journal --format json").format(osd=osd)
364 logging.debug(cmd)
365 tmpfd = open(TMPFILE, "wb")
366 ret = call(cmd, shell=True, stdout=tmpfd)
367 if ret != 0:
368 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
369 ERRORS += 1
370 continue
371 tmpfd.close()
372 tmpfd = open(TMPFILE, "r")
373 jsondict = json.load(tmpfd)
374 tmpfd.close()
375 os.unlink(TMPFILE)
376
377 journal_errors = check_journal(jsondict)
378 if journal_errors is not 0:
379 logging.error(jsondict)
380 ERRORS += journal_errors
381
382 return ERRORS
383
384 CEPH_BUILD_DIR = os.environ.get('CEPH_BUILD_DIR')
385 CEPH_BIN = os.environ.get('CEPH_BIN')
386 CEPH_ROOT = os.environ.get('CEPH_ROOT')
387
388 if not CEPH_BUILD_DIR:
389 CEPH_BUILD_DIR=os.getcwd()
390 os.putenv('CEPH_BUILD_DIR', CEPH_BUILD_DIR)
391 CEPH_BIN=os.path.join(CEPH_BUILD_DIR, 'bin')
392 os.putenv('CEPH_BIN', CEPH_BIN)
393 CEPH_ROOT=os.path.dirname(CEPH_BUILD_DIR)
394 os.putenv('CEPH_ROOT', CEPH_ROOT)
395 CEPH_LIB=os.path.join(CEPH_BUILD_DIR, 'lib')
396 os.putenv('CEPH_LIB', CEPH_LIB)
397
398 try:
399 os.mkdir("td")
400 except:
401 pass # ok if this is already there
402 CEPH_DIR = os.path.join(CEPH_BUILD_DIR, os.path.join("td", "cot_dir"))
403 CEPH_CONF = os.path.join(CEPH_DIR, 'ceph.conf')
404
405 def kill_daemons():
406 call("{path}/init-ceph -c {conf} stop > /dev/null 2>&1".format(conf=CEPH_CONF, path=CEPH_BIN), shell=True)
407
408
409 def check_data(DATADIR, TMPFILE, OSDDIR, SPLIT_NAME):
410 repcount = 0
411 ERRORS = 0
412 for rawnsfile in [f for f in os.listdir(DATADIR) if f.split('-')[1].find(SPLIT_NAME) == 0]:
413 nsfile = rawnsfile.split("__")[0]
414 clone = rawnsfile.split("__")[1]
415 nspace = nsfile.split("-")[0]
416 file = nsfile.split("-")[1] + "__" + clone
417 # Skip clones
418 if clone != "head":
419 continue
420 path = os.path.join(DATADIR, rawnsfile)
421 tmpfd = open(TMPFILE, "wb")
422 cmd = "find {dir} -name '{file}_*_{nspace}_*'".format(dir=OSDDIR, file=file, nspace=nspace)
423 logging.debug(cmd)
424 ret = call(cmd, shell=True, stdout=tmpfd)
425 if ret:
426 logging.critical("INTERNAL ERROR")
427 return 1
428 tmpfd.close()
429 obj_locs = get_lines(TMPFILE)
430 if len(obj_locs) == 0:
431 logging.error("Can't find imported object {name}".format(name=file))
432 ERRORS += 1
433 for obj_loc in obj_locs:
434 # For btrfs skip snap_* dirs
435 if re.search("/snap_[0-9]*/", obj_loc) is not None:
436 continue
437 repcount += 1
438 cmd = "diff -q {src} {obj_loc}".format(src=path, obj_loc=obj_loc)
439 logging.debug(cmd)
440 ret = call(cmd, shell=True)
441 if ret != 0:
442 logging.error("{file} data not imported properly into {obj}".format(file=file, obj=obj_loc))
443 ERRORS += 1
444 return ERRORS, repcount
445
446
447 def set_osd_weight(CFSD_PREFIX, osd_ids, osd_path, weight):
448 # change the weight of osd.0 to math.pi in the newest osdmap of given osd
449 osdmap_file = tempfile.NamedTemporaryFile(delete=True)
450 cmd = (CFSD_PREFIX + "--op get-osdmap --file {osdmap_file}").format(osd=osd_path,
451 osdmap_file=osdmap_file.name)
452 output = check_output(cmd, shell=True)
453 epoch = int(re.findall('#(\d+)', output)[0])
454
455 new_crush_file = tempfile.NamedTemporaryFile(delete=True)
456 old_crush_file = tempfile.NamedTemporaryFile(delete=True)
457 ret = call("{path}/osdmaptool --export-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name,
458 crush_file=old_crush_file.name, path=CEPH_BIN),
459 stdout=DEVNULL,
460 stderr=DEVNULL,
461 shell=True)
462 assert(ret == 0)
463
464 for osd_id in osd_ids:
465 cmd = "{path}/crushtool -i {crush_file} --reweight-item osd.{osd} {weight} -o {new_crush_file}".format(osd=osd_id,
466 crush_file=old_crush_file.name,
467 weight=weight,
468 new_crush_file=new_crush_file.name, path=CEPH_BIN)
469 ret = call(cmd, stdout=DEVNULL, shell=True)
470 assert(ret == 0)
471 old_crush_file, new_crush_file = new_crush_file, old_crush_file
472
473 # change them back, since we don't need to preapre for another round
474 old_crush_file, new_crush_file = new_crush_file, old_crush_file
475 old_crush_file.close()
476
477 ret = call("{path}/osdmaptool --import-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name,
478 crush_file=new_crush_file.name, path=CEPH_BIN),
479 stdout=DEVNULL,
480 stderr=DEVNULL,
481 shell=True)
482 assert(ret == 0)
483
484 # Minimum test of --dry-run by using it, but not checking anything
485 cmd = CFSD_PREFIX + "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force --dry-run"
486 cmd = cmd.format(osd=osd_path, osdmap_file=osdmap_file.name, epoch=epoch)
487 ret = call(cmd, stdout=DEVNULL, shell=True)
488 assert(ret == 0)
489
490 # osdmaptool increases the epoch of the changed osdmap, so we need to force the tool
491 # to use use a different epoch than the one in osdmap
492 cmd = CFSD_PREFIX + "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force"
493 cmd = cmd.format(osd=osd_path, osdmap_file=osdmap_file.name, epoch=epoch)
494 ret = call(cmd, stdout=DEVNULL, shell=True)
495
496 return ret == 0
497
498 def get_osd_weights(CFSD_PREFIX, osd_ids, osd_path):
499 osdmap_file = tempfile.NamedTemporaryFile(delete=True)
500 cmd = (CFSD_PREFIX + "--op get-osdmap --file {osdmap_file}").format(osd=osd_path,
501 osdmap_file=osdmap_file.name)
502 ret = call(cmd, stdout=DEVNULL, shell=True)
503 if ret != 0:
504 return None
505 # we have to read the weights from the crush map, even we can query the weights using
506 # osdmaptool, but please keep in mind, they are different:
507 # item weights in crush map versus weight associated with each osd in osdmap
508 crush_file = tempfile.NamedTemporaryFile(delete=True)
509 ret = call("{path}/osdmaptool --export-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name,
510 crush_file=crush_file.name, path=CEPH_BIN),
511 stdout=DEVNULL,
512 shell=True)
513 assert(ret == 0)
514 output = check_output("{path}/crushtool --tree -i {crush_file} | tail -n {num_osd}".format(crush_file=crush_file.name,
515 num_osd=len(osd_ids), path=CEPH_BIN),
516 stderr=DEVNULL,
517 shell=True)
518 weights = []
519 for line in output.strip().split('\n'):
520 print(line)
521 linev = re.split('\s+', line)
522 if linev[0] is '':
523 linev.pop(0)
524 print('linev %s' % linev)
525 weights.append(float(linev[2]))
526
527 return weights
528
529
530 def test_get_set_osdmap(CFSD_PREFIX, osd_ids, osd_paths):
531 print("Testing get-osdmap and set-osdmap")
532 errors = 0
533 kill_daemons()
534 weight = 1 / math.e # just some magic number in [0, 1]
535 changed = []
536 for osd_path in osd_paths:
537 if set_osd_weight(CFSD_PREFIX, osd_ids, osd_path, weight):
538 changed.append(osd_path)
539 else:
540 logging.warning("Failed to change the weights: {0}".format(osd_path))
541 # i am pissed off if none of the store gets changed
542 if not changed:
543 errors += 1
544
545 for osd_path in changed:
546 weights = get_osd_weights(CFSD_PREFIX, osd_ids, osd_path)
547 if not weights:
548 errors += 1
549 continue
550 if any(abs(w - weight) > 1e-5 for w in weights):
551 logging.warning("Weight is not changed: {0} != {1}".format(weights, weight))
552 errors += 1
553 return errors
554
555 def test_get_set_inc_osdmap(CFSD_PREFIX, osd_path):
556 # incrementals are not used unless we need to build an MOSDMap to update
557 # OSD's peers, so an obvious way to test it is simply overwrite an epoch
558 # with a different copy, and read it back to see if it matches.
559 kill_daemons()
560 file_e2 = tempfile.NamedTemporaryFile(delete=True)
561 cmd = (CFSD_PREFIX + "--op get-inc-osdmap --file {file}").format(osd=osd_path,
562 file=file_e2.name)
563 output = check_output(cmd, shell=True)
564 epoch = int(re.findall('#(\d+)', output)[0])
565 # backup e1 incremental before overwriting it
566 epoch -= 1
567 file_e1_backup = tempfile.NamedTemporaryFile(delete=True)
568 cmd = CFSD_PREFIX + "--op get-inc-osdmap --epoch {epoch} --file {file}"
569 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_backup.name), shell=True)
570 if ret: return 1
571 # overwrite e1 with e2
572 cmd = CFSD_PREFIX + "--op set-inc-osdmap --force --epoch {epoch} --file {file}"
573 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e2.name), shell=True)
574 if ret: return 1
575 # Use dry-run to set back to e1 which shouldn't happen
576 cmd = CFSD_PREFIX + "--op set-inc-osdmap --dry-run --epoch {epoch} --file {file}"
577 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_backup.name), shell=True)
578 if ret: return 1
579 # read from e1
580 file_e1_read = tempfile.NamedTemporaryFile(delete=True)
581 cmd = CFSD_PREFIX + "--op get-inc-osdmap --epoch {epoch} --file {file}"
582 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_read.name), shell=True)
583 if ret: return 1
584 errors = 0
585 try:
586 if not filecmp.cmp(file_e2.name, file_e1_read.name, shallow=False):
587 logging.error("{{get,set}}-inc-osdmap mismatch {0} != {1}".format(file_e2.name, file_e1_read.name))
588 errors += 1
589 finally:
590 # revert the change with file_e1_backup
591 cmd = CFSD_PREFIX + "--op set-inc-osdmap --epoch {epoch} --file {file}"
592 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_backup.name), shell=True)
593 if ret:
594 logging.error("Failed to revert the changed inc-osdmap")
595 errors += 1
596
597 return errors
598
599
600 def test_removeall(CFSD_PREFIX, db, OBJREPPGS, REP_POOL, CEPH_BIN, OSDDIR, REP_NAME, NUM_CLONED_REP_OBJECTS):
601 # Test removeall
602 TMPFILE = r"/tmp/tmp.{pid}".format(pid=os.getpid())
603 nullfd = open(os.devnull, "w")
604 errors=0
605 print("Test removeall")
606 kill_daemons()
607 test_force_remove = 0
608 for nspace in db.keys():
609 for basename in db[nspace].keys():
610 JSON = db[nspace][basename]['json']
611 for pg in OBJREPPGS:
612 OSDS = get_osds(pg, OSDDIR)
613 for osd in OSDS:
614 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
615 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
616 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
617 if not fnames:
618 continue
619
620 if int(basename.split(REP_NAME)[1]) <= int(NUM_CLONED_REP_OBJECTS):
621 cmd = (CFSD_PREFIX + "'{json}' remove").format(osd=osd, json=JSON)
622 errors += test_failure(cmd, "Snapshots are present, use removeall to delete everything")
623 if not test_force_remove:
624
625 cmd = (CFSD_PREFIX + " '{json}' set-attr snapset /dev/null").format(osd=osd, json=JSON)
626 logging.debug(cmd)
627 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
628 if ret != 0:
629 logging.error("Test set-up to corrupt snapset failed for {json}".format(json=JSON))
630 errors += 1
631 # Do the removeall since this test failed to set-up
632 else:
633 test_force_remove = 1
634
635 cmd = (CFSD_PREFIX + " '{json}' --force remove").format(osd=osd, json=JSON)
636 logging.debug(cmd)
637 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
638 if ret != 0:
639 logging.error("forced remove with corrupt snapset failed for {json}".format(json=JSON))
640 errors += 1
641 continue
642
643 cmd = (CFSD_PREFIX + " --force --dry-run '{json}' remove").format(osd=osd, json=JSON)
644 logging.debug(cmd)
645 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
646 if ret != 0:
647 logging.error("remove with --force failed for {json}".format(json=JSON))
648 errors += 1
649
650 cmd = (CFSD_PREFIX + " --dry-run '{json}' removeall").format(osd=osd, json=JSON)
651 logging.debug(cmd)
652 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
653 if ret != 0:
654 logging.error("removeall failed for {json}".format(json=JSON))
655 errors += 1
656
657 cmd = (CFSD_PREFIX + " '{json}' removeall").format(osd=osd, json=JSON)
658 logging.debug(cmd)
659 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
660 if ret != 0:
661 logging.error("removeall failed for {json}".format(json=JSON))
662 errors += 1
663
664 tmpfd = open(TMPFILE, "w")
665 cmd = (CFSD_PREFIX + "--op list --pgid {pg} --namespace {ns} {name}").format(osd=osd, pg=pg, ns=nspace, name=basename)
666 logging.debug(cmd)
667 ret = call(cmd, shell=True, stdout=tmpfd)
668 if ret != 0:
669 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
670 errors += 1
671 tmpfd.close()
672 lines = get_lines(TMPFILE)
673 if len(lines) != 0:
674 logging.error("Removeall didn't remove all objects {ns}/{name} : {lines}".format(ns=nspace, name=basename, lines=lines))
675 errors += 1
676 vstart(new=False)
677 wait_for_health()
678 cmd = "{path}/rados -p {pool} rmsnap snap1".format(pool=REP_POOL, path=CEPH_BIN)
679 logging.debug(cmd)
680 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
681 if ret != 0:
682 logging.error("rados rmsnap failed")
683 errors += 1
684 time.sleep(2)
685 wait_for_health()
686 return errors
687
688
689 def main(argv):
690 if sys.version_info[0] < 3:
691 sys.stdout = stdout = os.fdopen(sys.stdout.fileno(), 'wb', 0)
692 else:
693 stdout = sys.stdout.buffer
694 if len(argv) > 1 and argv[1] == "debug":
695 nullfd = stdout
696 else:
697 nullfd = DEVNULL
698
699 call("rm -fr {dir}; mkdir -p {dir}".format(dir=CEPH_DIR), shell=True)
700 os.chdir(CEPH_DIR)
701 os.environ["CEPH_DIR"] = CEPH_DIR
702 OSDDIR = "dev"
703 REP_POOL = "rep_pool"
704 REP_NAME = "REPobject"
705 EC_POOL = "ec_pool"
706 EC_NAME = "ECobject"
707 if len(argv) > 0 and argv[0] == 'large':
708 PG_COUNT = 12
709 NUM_REP_OBJECTS = 200
710 NUM_CLONED_REP_OBJECTS = 50
711 NUM_EC_OBJECTS = 12
712 NUM_NSPACES = 4
713 # Larger data sets for first object per namespace
714 DATALINECOUNT = 50000
715 # Number of objects to do xattr/omap testing on
716 ATTR_OBJS = 10
717 else:
718 PG_COUNT = 4
719 NUM_REP_OBJECTS = 2
720 NUM_CLONED_REP_OBJECTS = 2
721 NUM_EC_OBJECTS = 2
722 NUM_NSPACES = 2
723 # Larger data sets for first object per namespace
724 DATALINECOUNT = 10
725 # Number of objects to do xattr/omap testing on
726 ATTR_OBJS = 2
727 ERRORS = 0
728 pid = os.getpid()
729 TESTDIR = "/tmp/test.{pid}".format(pid=pid)
730 DATADIR = "/tmp/data.{pid}".format(pid=pid)
731 CFSD_PREFIX = CEPH_BIN + "/ceph-objectstore-tool --no-mon-config --data-path " + OSDDIR + "/{osd} "
732 PROFNAME = "testecprofile"
733
734 os.environ['CEPH_CONF'] = CEPH_CONF
735 vstart(new=True)
736 wait_for_health()
737
738 cmd = "{path}/ceph osd pool create {pool} {pg} {pg} replicated".format(pool=REP_POOL, pg=PG_COUNT, path=CEPH_BIN)
739 logging.debug(cmd)
740 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
741 time.sleep(2)
742 REPID = get_pool_id(REP_POOL, nullfd)
743
744 print("Created Replicated pool #{repid}".format(repid=REPID))
745
746 cmd = "{path}/ceph osd erasure-code-profile set {prof} crush-failure-domain=osd".format(prof=PROFNAME, path=CEPH_BIN)
747 logging.debug(cmd)
748 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
749 cmd = "{path}/ceph osd erasure-code-profile get {prof}".format(prof=PROFNAME, path=CEPH_BIN)
750 logging.debug(cmd)
751 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
752 cmd = "{path}/ceph osd pool create {pool} {pg} {pg} erasure {prof}".format(pool=EC_POOL, prof=PROFNAME, pg=PG_COUNT, path=CEPH_BIN)
753 logging.debug(cmd)
754 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
755 ECID = get_pool_id(EC_POOL, nullfd)
756
757 print("Created Erasure coded pool #{ecid}".format(ecid=ECID))
758
759 print("Creating {objs} objects in replicated pool".format(objs=(NUM_REP_OBJECTS*NUM_NSPACES)))
760 cmd = "mkdir -p {datadir}".format(datadir=DATADIR)
761 logging.debug(cmd)
762 call(cmd, shell=True)
763
764 db = {}
765
766 objects = range(1, NUM_REP_OBJECTS + 1)
767 nspaces = range(NUM_NSPACES)
768 for n in nspaces:
769 nspace = get_nspace(n)
770
771 db[nspace] = {}
772
773 for i in objects:
774 NAME = REP_NAME + "{num}".format(num=i)
775 LNAME = nspace + "-" + NAME
776 DDNAME = os.path.join(DATADIR, LNAME)
777 DDNAME += "__head"
778
779 cmd = "rm -f " + DDNAME
780 logging.debug(cmd)
781 call(cmd, shell=True)
782
783 if i == 1:
784 dataline = range(DATALINECOUNT)
785 else:
786 dataline = range(1)
787 fd = open(DDNAME, "w")
788 data = "This is the replicated data for " + LNAME + "\n"
789 for _ in dataline:
790 fd.write(data)
791 fd.close()
792
793 cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=REP_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
794 logging.debug(cmd)
795 ret = call(cmd, shell=True, stderr=nullfd)
796 if ret != 0:
797 logging.critical("Rados put command failed with {ret}".format(ret=ret))
798 return 1
799
800 db[nspace][NAME] = {}
801
802 if i < ATTR_OBJS + 1:
803 keys = range(i)
804 else:
805 keys = range(0)
806 db[nspace][NAME]["xattr"] = {}
807 for k in keys:
808 if k == 0:
809 continue
810 mykey = "key{i}-{k}".format(i=i, k=k)
811 myval = "val{i}-{k}".format(i=i, k=k)
812 cmd = "{path}/rados -p {pool} -N '{nspace}' setxattr {name} {key} {val}".format(pool=REP_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN)
813 logging.debug(cmd)
814 ret = call(cmd, shell=True)
815 if ret != 0:
816 logging.error("setxattr failed with {ret}".format(ret=ret))
817 ERRORS += 1
818 db[nspace][NAME]["xattr"][mykey] = myval
819
820 # Create omap header in all objects but REPobject1
821 if i < ATTR_OBJS + 1 and i != 1:
822 myhdr = "hdr{i}".format(i=i)
823 cmd = "{path}/rados -p {pool} -N '{nspace}' setomapheader {name} {hdr}".format(pool=REP_POOL, name=NAME, hdr=myhdr, nspace=nspace, path=CEPH_BIN)
824 logging.debug(cmd)
825 ret = call(cmd, shell=True)
826 if ret != 0:
827 logging.critical("setomapheader failed with {ret}".format(ret=ret))
828 ERRORS += 1
829 db[nspace][NAME]["omapheader"] = myhdr
830
831 db[nspace][NAME]["omap"] = {}
832 for k in keys:
833 if k == 0:
834 continue
835 mykey = "okey{i}-{k}".format(i=i, k=k)
836 myval = "oval{i}-{k}".format(i=i, k=k)
837 cmd = "{path}/rados -p {pool} -N '{nspace}' setomapval {name} {key} {val}".format(pool=REP_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN)
838 logging.debug(cmd)
839 ret = call(cmd, shell=True)
840 if ret != 0:
841 logging.critical("setomapval failed with {ret}".format(ret=ret))
842 db[nspace][NAME]["omap"][mykey] = myval
843
844 # Create some clones
845 cmd = "{path}/rados -p {pool} mksnap snap1".format(pool=REP_POOL, path=CEPH_BIN)
846 logging.debug(cmd)
847 call(cmd, shell=True)
848
849 objects = range(1, NUM_CLONED_REP_OBJECTS + 1)
850 nspaces = range(NUM_NSPACES)
851 for n in nspaces:
852 nspace = get_nspace(n)
853
854 for i in objects:
855 NAME = REP_NAME + "{num}".format(num=i)
856 LNAME = nspace + "-" + NAME
857 DDNAME = os.path.join(DATADIR, LNAME)
858 # First clone
859 CLONENAME = DDNAME + "__1"
860 DDNAME += "__head"
861
862 cmd = "mv -f " + DDNAME + " " + CLONENAME
863 logging.debug(cmd)
864 call(cmd, shell=True)
865
866 if i == 1:
867 dataline = range(DATALINECOUNT)
868 else:
869 dataline = range(1)
870 fd = open(DDNAME, "w")
871 data = "This is the replicated data after a snapshot for " + LNAME + "\n"
872 for _ in dataline:
873 fd.write(data)
874 fd.close()
875
876 cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=REP_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
877 logging.debug(cmd)
878 ret = call(cmd, shell=True, stderr=nullfd)
879 if ret != 0:
880 logging.critical("Rados put command failed with {ret}".format(ret=ret))
881 return 1
882
883 print("Creating {objs} objects in erasure coded pool".format(objs=(NUM_EC_OBJECTS*NUM_NSPACES)))
884
885 objects = range(1, NUM_EC_OBJECTS + 1)
886 nspaces = range(NUM_NSPACES)
887 for n in nspaces:
888 nspace = get_nspace(n)
889
890 for i in objects:
891 NAME = EC_NAME + "{num}".format(num=i)
892 LNAME = nspace + "-" + NAME
893 DDNAME = os.path.join(DATADIR, LNAME)
894 DDNAME += "__head"
895
896 cmd = "rm -f " + DDNAME
897 logging.debug(cmd)
898 call(cmd, shell=True)
899
900 if i == 1:
901 dataline = range(DATALINECOUNT)
902 else:
903 dataline = range(1)
904 fd = open(DDNAME, "w")
905 data = "This is the erasure coded data for " + LNAME + "\n"
906 for j in dataline:
907 fd.write(data)
908 fd.close()
909
910 cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=EC_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
911 logging.debug(cmd)
912 ret = call(cmd, shell=True, stderr=nullfd)
913 if ret != 0:
914 logging.critical("Erasure coded pool creation failed with {ret}".format(ret=ret))
915 return 1
916
917 db[nspace][NAME] = {}
918
919 db[nspace][NAME]["xattr"] = {}
920 if i < ATTR_OBJS + 1:
921 keys = range(i)
922 else:
923 keys = range(0)
924 for k in keys:
925 if k == 0:
926 continue
927 mykey = "key{i}-{k}".format(i=i, k=k)
928 myval = "val{i}-{k}".format(i=i, k=k)
929 cmd = "{path}/rados -p {pool} -N '{nspace}' setxattr {name} {key} {val}".format(pool=EC_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN)
930 logging.debug(cmd)
931 ret = call(cmd, shell=True)
932 if ret != 0:
933 logging.error("setxattr failed with {ret}".format(ret=ret))
934 ERRORS += 1
935 db[nspace][NAME]["xattr"][mykey] = myval
936
937 # Omap isn't supported in EC pools
938 db[nspace][NAME]["omap"] = {}
939
940 logging.debug(db)
941
942 kill_daemons()
943
944 if ERRORS:
945 logging.critical("Unable to set up test")
946 return 1
947
948 ALLREPPGS = get_pgs(OSDDIR, REPID)
949 logging.debug(ALLREPPGS)
950 ALLECPGS = get_pgs(OSDDIR, ECID)
951 logging.debug(ALLECPGS)
952
953 OBJREPPGS = get_objs(ALLREPPGS, REP_NAME, OSDDIR, REPID)
954 logging.debug(OBJREPPGS)
955 OBJECPGS = get_objs(ALLECPGS, EC_NAME, OSDDIR, ECID)
956 logging.debug(OBJECPGS)
957
958 ONEPG = ALLREPPGS[0]
959 logging.debug(ONEPG)
960 osds = get_osds(ONEPG, OSDDIR)
961 ONEOSD = osds[0]
962 logging.debug(ONEOSD)
963
964 print("Test invalid parameters")
965 # On export can't use stdout to a terminal
966 cmd = (CFSD_PREFIX + "--op export --pgid {pg}").format(osd=ONEOSD, pg=ONEPG)
967 ERRORS += test_failure(cmd, "stdout is a tty and no --file filename specified", tty=True)
968
969 # On export can't use stdout to a terminal
970 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file -").format(osd=ONEOSD, pg=ONEPG)
971 ERRORS += test_failure(cmd, "stdout is a tty and no --file filename specified", tty=True)
972
973 # Prep a valid ec export file for import failure tests
974 ONEECPG = ALLECPGS[0]
975 osds = get_osds(ONEECPG, OSDDIR)
976 ONEECOSD = osds[0]
977 OTHERFILE = "/tmp/foo.{pid}".format(pid=pid)
978 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=ONEECOSD, pg=ONEECPG, file=OTHERFILE)
979 logging.debug(cmd)
980 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
981
982 os.unlink(OTHERFILE)
983
984 # Prep a valid export file for import failure tests
985 OTHERFILE = "/tmp/foo.{pid}".format(pid=pid)
986 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=ONEOSD, pg=ONEPG, file=OTHERFILE)
987 logging.debug(cmd)
988 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
989
990 # On import can't specify a different pgid than the file
991 TMPPG="{pool}.80".format(pool=REPID)
992 cmd = (CFSD_PREFIX + "--op import --pgid 12.dd --file {file}").format(osd=ONEOSD, pg=TMPPG, file=OTHERFILE)
993 ERRORS += test_failure(cmd, "specified pgid 12.dd does not match actual pgid")
994
995 os.unlink(OTHERFILE)
996 cmd = (CFSD_PREFIX + "--op import --file {FOO}").format(osd=ONEOSD, FOO=OTHERFILE)
997 ERRORS += test_failure(cmd, "file: {FOO}: No such file or directory".format(FOO=OTHERFILE))
998
999 cmd = "{path}/ceph-objectstore-tool --no-mon-config --data-path BAD_DATA_PATH --op list".format(osd=ONEOSD, path=CEPH_BIN)
1000 ERRORS += test_failure(cmd, "data-path: BAD_DATA_PATH: No such file or directory")
1001
1002 cmd = (CFSD_PREFIX + "--journal-path BAD_JOURNAL_PATH --op list").format(osd=ONEOSD)
1003 ERRORS += test_failure(cmd, "journal-path: BAD_JOURNAL_PATH: No such file or directory")
1004
1005 cmd = (CFSD_PREFIX + "--journal-path /bin --op list").format(osd=ONEOSD)
1006 ERRORS += test_failure(cmd, "journal-path: /bin: (21) Is a directory")
1007
1008 # On import can't use stdin from a terminal
1009 cmd = (CFSD_PREFIX + "--op import --pgid {pg}").format(osd=ONEOSD, pg=ONEPG)
1010 ERRORS += test_failure(cmd, "stdin is a tty and no --file filename specified", tty=True)
1011
1012 # On import can't use stdin from a terminal
1013 cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file -").format(osd=ONEOSD, pg=ONEPG)
1014 ERRORS += test_failure(cmd, "stdin is a tty and no --file filename specified", tty=True)
1015
1016 # Specify a bad --type
1017 os.mkdir(OSDDIR + "/fakeosd")
1018 cmd = ("{path}/ceph-objectstore-tool --no-mon-config --data-path " + OSDDIR + "/{osd} --type foobar --op list --pgid {pg}").format(osd="fakeosd", pg=ONEPG, path=CEPH_BIN)
1019 ERRORS += test_failure(cmd, "Unable to create store of type foobar")
1020
1021 # Don't specify a data-path
1022 cmd = "{path}/ceph-objectstore-tool --no-mon-config --type memstore --op list --pgid {pg}".format(dir=OSDDIR, osd=ONEOSD, pg=ONEPG, path=CEPH_BIN)
1023 ERRORS += test_failure(cmd, "Must provide --data-path")
1024
1025 cmd = (CFSD_PREFIX + "--op remove --pgid 2.0").format(osd=ONEOSD)
1026 ERRORS += test_failure(cmd, "Please use export-remove or you must use --force option")
1027
1028 cmd = (CFSD_PREFIX + "--force --op remove").format(osd=ONEOSD)
1029 ERRORS += test_failure(cmd, "Must provide pgid")
1030
1031 # Don't secify a --op nor object command
1032 cmd = CFSD_PREFIX.format(osd=ONEOSD)
1033 ERRORS += test_failure(cmd, "Must provide --op or object command...")
1034
1035 # Specify a bad --op command
1036 cmd = (CFSD_PREFIX + "--op oops").format(osd=ONEOSD)
1037 ERRORS += test_failure(cmd, "Must provide --op (info, log, remove, mkfs, fsck, repair, export, export-remove, import, list, fix-lost, list-pgs, dump-journal, dump-super, meta-list, get-osdmap, set-osdmap, get-inc-osdmap, set-inc-osdmap, mark-complete, reset-last-complete, dump-export, trim-pg-log)")
1038
1039 # Provide just the object param not a command
1040 cmd = (CFSD_PREFIX + "object").format(osd=ONEOSD)
1041 ERRORS += test_failure(cmd, "Invalid syntax, missing command")
1042
1043 # Provide an object name that doesn't exist
1044 cmd = (CFSD_PREFIX + "NON_OBJECT get-bytes").format(osd=ONEOSD)
1045 ERRORS += test_failure(cmd, "No object id 'NON_OBJECT' found")
1046
1047 # Provide an invalid object command
1048 cmd = (CFSD_PREFIX + "--pgid {pg} '' notacommand").format(osd=ONEOSD, pg=ONEPG)
1049 ERRORS += test_failure(cmd, "Unknown object command 'notacommand'")
1050
1051 cmd = (CFSD_PREFIX + "foo list-omap").format(osd=ONEOSD, pg=ONEPG)
1052 ERRORS += test_failure(cmd, "No object id 'foo' found or invalid JSON specified")
1053
1054 cmd = (CFSD_PREFIX + "'{{\"oid\":\"obj4\",\"key\":\"\",\"snapid\":-1,\"hash\":2826278768,\"max\":0,\"pool\":1,\"namespace\":\"\"}}' list-omap").format(osd=ONEOSD, pg=ONEPG)
1055 ERRORS += test_failure(cmd, "Without --pgid the object '{\"oid\":\"obj4\",\"key\":\"\",\"snapid\":-1,\"hash\":2826278768,\"max\":0,\"pool\":1,\"namespace\":\"\"}' must be a JSON array")
1056
1057 cmd = (CFSD_PREFIX + "'[]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1058 ERRORS += test_failure(cmd, "Object '[]' must be a JSON array with 2 elements")
1059
1060 cmd = (CFSD_PREFIX + "'[\"1.0\"]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1061 ERRORS += test_failure(cmd, "Object '[\"1.0\"]' must be a JSON array with 2 elements")
1062
1063 cmd = (CFSD_PREFIX + "'[\"1.0\", 5, 8, 9]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1064 ERRORS += test_failure(cmd, "Object '[\"1.0\", 5, 8, 9]' must be a JSON array with 2 elements")
1065
1066 cmd = (CFSD_PREFIX + "'[1, 2]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1067 ERRORS += test_failure(cmd, "Object '[1, 2]' must be a JSON array with the first element a string")
1068
1069 cmd = (CFSD_PREFIX + "'[\"1.3\",{{\"snapid\":\"not an int\"}}]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1070 ERRORS += test_failure(cmd, "Decode object JSON error: value type is 2 not 4")
1071
1072 TMPFILE = r"/tmp/tmp.{pid}".format(pid=pid)
1073 ALLPGS = OBJREPPGS + OBJECPGS
1074 OSDS = get_osds(ALLPGS[0], OSDDIR)
1075 osd = OSDS[0]
1076
1077 print("Test all --op dump-journal")
1078 ALLOSDS = [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]
1079 ERRORS += test_dump_journal(CFSD_PREFIX, ALLOSDS)
1080
1081 # Test --op list and generate json for all objects
1082 print("Test --op list variants")
1083
1084 # retrieve all objects from all PGs
1085 tmpfd = open(TMPFILE, "wb")
1086 cmd = (CFSD_PREFIX + "--op list --format json").format(osd=osd)
1087 logging.debug(cmd)
1088 ret = call(cmd, shell=True, stdout=tmpfd)
1089 if ret != 0:
1090 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
1091 ERRORS += 1
1092 tmpfd.close()
1093 lines = get_lines(TMPFILE)
1094 JSONOBJ = sorted(set(lines))
1095 (pgid, coll, jsondict) = json.loads(JSONOBJ[0])[0]
1096
1097 # retrieve all objects in a given PG
1098 tmpfd = open(OTHERFILE, "ab")
1099 cmd = (CFSD_PREFIX + "--op list --pgid {pg} --format json").format(osd=osd, pg=pgid)
1100 logging.debug(cmd)
1101 ret = call(cmd, shell=True, stdout=tmpfd)
1102 if ret != 0:
1103 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
1104 ERRORS += 1
1105 tmpfd.close()
1106 lines = get_lines(OTHERFILE)
1107 JSONOBJ = sorted(set(lines))
1108 (other_pgid, other_coll, other_jsondict) = json.loads(JSONOBJ[0])[0]
1109
1110 if pgid != other_pgid or jsondict != other_jsondict or coll != other_coll:
1111 logging.error("the first line of --op list is different "
1112 "from the first line of --op list --pgid {pg}".format(pg=pgid))
1113 ERRORS += 1
1114
1115 # retrieve all objects with a given name in a given PG
1116 tmpfd = open(OTHERFILE, "wb")
1117 cmd = (CFSD_PREFIX + "--op list --pgid {pg} {object} --format json").format(osd=osd, pg=pgid, object=jsondict['oid'])
1118 logging.debug(cmd)
1119 ret = call(cmd, shell=True, stdout=tmpfd)
1120 if ret != 0:
1121 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
1122 ERRORS += 1
1123 tmpfd.close()
1124 lines = get_lines(OTHERFILE)
1125 JSONOBJ = sorted(set(lines))
1126 (other_pgid, other_coll, other_jsondict) in json.loads(JSONOBJ[0])[0]
1127
1128 if pgid != other_pgid or jsondict != other_jsondict or coll != other_coll:
1129 logging.error("the first line of --op list is different "
1130 "from the first line of --op list --pgid {pg} {object}".format(pg=pgid, object=jsondict['oid']))
1131 ERRORS += 1
1132
1133 print("Test --op list by generating json for all objects using default format")
1134 for pg in ALLPGS:
1135 OSDS = get_osds(pg, OSDDIR)
1136 for osd in OSDS:
1137 tmpfd = open(TMPFILE, "ab")
1138 cmd = (CFSD_PREFIX + "--op list --pgid {pg}").format(osd=osd, pg=pg)
1139 logging.debug(cmd)
1140 ret = call(cmd, shell=True, stdout=tmpfd)
1141 if ret != 0:
1142 logging.error("Bad exit status {ret} from --op list request".format(ret=ret))
1143 ERRORS += 1
1144
1145 tmpfd.close()
1146 lines = get_lines(TMPFILE)
1147 JSONOBJ = sorted(set(lines))
1148 for JSON in JSONOBJ:
1149 (pgid, jsondict) = json.loads(JSON)
1150 # Skip clones for now
1151 if jsondict['snapid'] != -2:
1152 continue
1153 db[jsondict['namespace']][jsondict['oid']]['json'] = json.dumps((pgid, jsondict))
1154 # print db[jsondict['namespace']][jsondict['oid']]['json']
1155 if jsondict['oid'].find(EC_NAME) == 0 and 'shard_id' not in jsondict:
1156 logging.error("Malformed JSON {json}".format(json=JSON))
1157 ERRORS += 1
1158
1159 # Test get-bytes
1160 print("Test get-bytes and set-bytes")
1161 for nspace in db.keys():
1162 for basename in db[nspace].keys():
1163 file = os.path.join(DATADIR, nspace + "-" + basename + "__head")
1164 JSON = db[nspace][basename]['json']
1165 GETNAME = "/tmp/getbytes.{pid}".format(pid=pid)
1166 TESTNAME = "/tmp/testbytes.{pid}".format(pid=pid)
1167 SETNAME = "/tmp/setbytes.{pid}".format(pid=pid)
1168 BADNAME = "/tmp/badbytes.{pid}".format(pid=pid)
1169 for pg in OBJREPPGS:
1170 OSDS = get_osds(pg, OSDDIR)
1171 for osd in OSDS:
1172 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1173 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1174 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1175 if not fnames:
1176 continue
1177 try:
1178 os.unlink(GETNAME)
1179 except:
1180 pass
1181 cmd = (CFSD_PREFIX + " --pgid {pg} '{json}' get-bytes {fname}").format(osd=osd, pg=pg, json=JSON, fname=GETNAME)
1182 logging.debug(cmd)
1183 ret = call(cmd, shell=True)
1184 if ret != 0:
1185 logging.error("Bad exit status {ret}".format(ret=ret))
1186 ERRORS += 1
1187 continue
1188 cmd = "diff -q {file} {getfile}".format(file=file, getfile=GETNAME)
1189 ret = call(cmd, shell=True)
1190 if ret != 0:
1191 logging.error("Data from get-bytes differ")
1192 logging.debug("Got:")
1193 cat_file(logging.DEBUG, GETNAME)
1194 logging.debug("Expected:")
1195 cat_file(logging.DEBUG, file)
1196 ERRORS += 1
1197 fd = open(SETNAME, "w")
1198 data = "put-bytes going into {file}\n".format(file=file)
1199 fd.write(data)
1200 fd.close()
1201 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' set-bytes {sname}").format(osd=osd, pg=pg, json=JSON, sname=SETNAME)
1202 logging.debug(cmd)
1203 ret = call(cmd, shell=True)
1204 if ret != 0:
1205 logging.error("Bad exit status {ret} from set-bytes".format(ret=ret))
1206 ERRORS += 1
1207 fd = open(TESTNAME, "wb")
1208 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-bytes -").format(osd=osd, pg=pg, json=JSON)
1209 logging.debug(cmd)
1210 ret = call(cmd, shell=True, stdout=fd)
1211 fd.close()
1212 if ret != 0:
1213 logging.error("Bad exit status {ret} from get-bytes".format(ret=ret))
1214 ERRORS += 1
1215 cmd = "diff -q {setfile} {testfile}".format(setfile=SETNAME, testfile=TESTNAME)
1216 logging.debug(cmd)
1217 ret = call(cmd, shell=True)
1218 if ret != 0:
1219 logging.error("Data after set-bytes differ")
1220 logging.debug("Got:")
1221 cat_file(logging.DEBUG, TESTNAME)
1222 logging.debug("Expected:")
1223 cat_file(logging.DEBUG, SETNAME)
1224 ERRORS += 1
1225
1226 # Use set-bytes with --dry-run and make sure contents haven't changed
1227 fd = open(BADNAME, "w")
1228 data = "Bad data for --dry-run in {file}\n".format(file=file)
1229 fd.write(data)
1230 fd.close()
1231 cmd = (CFSD_PREFIX + "--dry-run --pgid {pg} '{json}' set-bytes {sname}").format(osd=osd, pg=pg, json=JSON, sname=BADNAME)
1232 logging.debug(cmd)
1233 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1234 if ret != 0:
1235 logging.error("Bad exit status {ret} from set-bytes --dry-run".format(ret=ret))
1236 ERRORS += 1
1237 fd = open(TESTNAME, "wb")
1238 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-bytes -").format(osd=osd, pg=pg, json=JSON)
1239 logging.debug(cmd)
1240 ret = call(cmd, shell=True, stdout=fd)
1241 fd.close()
1242 if ret != 0:
1243 logging.error("Bad exit status {ret} from get-bytes".format(ret=ret))
1244 ERRORS += 1
1245 cmd = "diff -q {setfile} {testfile}".format(setfile=SETNAME, testfile=TESTNAME)
1246 logging.debug(cmd)
1247 ret = call(cmd, shell=True)
1248 if ret != 0:
1249 logging.error("Data after set-bytes --dry-run changed!")
1250 logging.debug("Got:")
1251 cat_file(logging.DEBUG, TESTNAME)
1252 logging.debug("Expected:")
1253 cat_file(logging.DEBUG, SETNAME)
1254 ERRORS += 1
1255
1256 fd = open(file, "rb")
1257 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' set-bytes").format(osd=osd, pg=pg, json=JSON)
1258 logging.debug(cmd)
1259 ret = call(cmd, shell=True, stdin=fd)
1260 if ret != 0:
1261 logging.error("Bad exit status {ret} from set-bytes to restore object".format(ret=ret))
1262 ERRORS += 1
1263 fd.close()
1264
1265 try:
1266 os.unlink(GETNAME)
1267 except:
1268 pass
1269 try:
1270 os.unlink(TESTNAME)
1271 except:
1272 pass
1273 try:
1274 os.unlink(SETNAME)
1275 except:
1276 pass
1277 try:
1278 os.unlink(BADNAME)
1279 except:
1280 pass
1281
1282 # Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap
1283 print("Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap")
1284 for nspace in db.keys():
1285 for basename in db[nspace].keys():
1286 file = os.path.join(DATADIR, nspace + "-" + basename + "__head")
1287 JSON = db[nspace][basename]['json']
1288 for pg in OBJREPPGS:
1289 OSDS = get_osds(pg, OSDDIR)
1290 for osd in OSDS:
1291 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1292 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1293 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1294 if not fnames:
1295 continue
1296 for key, val in db[nspace][basename]["xattr"].items():
1297 attrkey = "_" + key
1298 cmd = (CFSD_PREFIX + " '{json}' get-attr {key}").format(osd=osd, json=JSON, key=attrkey)
1299 logging.debug(cmd)
1300 getval = check_output(cmd, shell=True)
1301 if getval != val:
1302 logging.error("get-attr of key {key} returned wrong val: {get} instead of {orig}".format(key=attrkey, get=getval, orig=val))
1303 ERRORS += 1
1304 continue
1305 # set-attr to bogus value "foobar"
1306 cmd = ("echo -n foobar | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1307 logging.debug(cmd)
1308 ret = call(cmd, shell=True)
1309 if ret != 0:
1310 logging.error("Bad exit status {ret} from set-attr".format(ret=ret))
1311 ERRORS += 1
1312 continue
1313 # Test set-attr with dry-run
1314 cmd = ("echo -n dryrunbroken | " + CFSD_PREFIX + "--dry-run '{json}' set-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1315 logging.debug(cmd)
1316 ret = call(cmd, shell=True, stdout=nullfd)
1317 if ret != 0:
1318 logging.error("Bad exit status {ret} from set-attr".format(ret=ret))
1319 ERRORS += 1
1320 continue
1321 # Check the set-attr
1322 cmd = (CFSD_PREFIX + " --pgid {pg} '{json}' get-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1323 logging.debug(cmd)
1324 getval = check_output(cmd, shell=True)
1325 if ret != 0:
1326 logging.error("Bad exit status {ret} from get-attr".format(ret=ret))
1327 ERRORS += 1
1328 continue
1329 if getval != "foobar":
1330 logging.error("Check of set-attr failed because we got {val}".format(val=getval))
1331 ERRORS += 1
1332 continue
1333 # Test rm-attr
1334 cmd = (CFSD_PREFIX + "'{json}' rm-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1335 logging.debug(cmd)
1336 ret = call(cmd, shell=True)
1337 if ret != 0:
1338 logging.error("Bad exit status {ret} from rm-attr".format(ret=ret))
1339 ERRORS += 1
1340 continue
1341 # Check rm-attr with dry-run
1342 cmd = (CFSD_PREFIX + "--dry-run '{json}' rm-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1343 logging.debug(cmd)
1344 ret = call(cmd, shell=True, stdout=nullfd)
1345 if ret != 0:
1346 logging.error("Bad exit status {ret} from rm-attr".format(ret=ret))
1347 ERRORS += 1
1348 continue
1349 cmd = (CFSD_PREFIX + "'{json}' get-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1350 logging.debug(cmd)
1351 ret = call(cmd, shell=True, stderr=nullfd, stdout=nullfd)
1352 if ret == 0:
1353 logging.error("For rm-attr expect get-attr to fail, but it succeeded")
1354 ERRORS += 1
1355 # Put back value
1356 cmd = ("echo -n {val} | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey, val=val)
1357 logging.debug(cmd)
1358 ret = call(cmd, shell=True)
1359 if ret != 0:
1360 logging.error("Bad exit status {ret} from set-attr".format(ret=ret))
1361 ERRORS += 1
1362 continue
1363
1364 hdr = db[nspace][basename].get("omapheader", "")
1365 cmd = (CFSD_PREFIX + "'{json}' get-omaphdr").format(osd=osd, json=JSON)
1366 logging.debug(cmd)
1367 gethdr = check_output(cmd, shell=True)
1368 if gethdr != hdr:
1369 logging.error("get-omaphdr was wrong: {get} instead of {orig}".format(get=gethdr, orig=hdr))
1370 ERRORS += 1
1371 continue
1372 # set-omaphdr to bogus value "foobar"
1373 cmd = ("echo -n foobar | " + CFSD_PREFIX + "'{json}' set-omaphdr").format(osd=osd, pg=pg, json=JSON)
1374 logging.debug(cmd)
1375 ret = call(cmd, shell=True)
1376 if ret != 0:
1377 logging.error("Bad exit status {ret} from set-omaphdr".format(ret=ret))
1378 ERRORS += 1
1379 continue
1380 # Check the set-omaphdr
1381 cmd = (CFSD_PREFIX + "'{json}' get-omaphdr").format(osd=osd, pg=pg, json=JSON)
1382 logging.debug(cmd)
1383 gethdr = check_output(cmd, shell=True)
1384 if ret != 0:
1385 logging.error("Bad exit status {ret} from get-omaphdr".format(ret=ret))
1386 ERRORS += 1
1387 continue
1388 if gethdr != "foobar":
1389 logging.error("Check of set-omaphdr failed because we got {val}".format(val=getval))
1390 ERRORS += 1
1391 continue
1392 # Test dry-run with set-omaphdr
1393 cmd = ("echo -n dryrunbroken | " + CFSD_PREFIX + "--dry-run '{json}' set-omaphdr").format(osd=osd, pg=pg, json=JSON)
1394 logging.debug(cmd)
1395 ret = call(cmd, shell=True, stdout=nullfd)
1396 if ret != 0:
1397 logging.error("Bad exit status {ret} from set-omaphdr".format(ret=ret))
1398 ERRORS += 1
1399 continue
1400 # Put back value
1401 cmd = ("echo -n {val} | " + CFSD_PREFIX + "'{json}' set-omaphdr").format(osd=osd, pg=pg, json=JSON, val=hdr)
1402 logging.debug(cmd)
1403 ret = call(cmd, shell=True)
1404 if ret != 0:
1405 logging.error("Bad exit status {ret} from set-omaphdr".format(ret=ret))
1406 ERRORS += 1
1407 continue
1408
1409 for omapkey, val in db[nspace][basename]["omap"].items():
1410 cmd = (CFSD_PREFIX + " '{json}' get-omap {key}").format(osd=osd, json=JSON, key=omapkey)
1411 logging.debug(cmd)
1412 getval = check_output(cmd, shell=True)
1413 if getval != val:
1414 logging.error("get-omap of key {key} returned wrong val: {get} instead of {orig}".format(key=omapkey, get=getval, orig=val))
1415 ERRORS += 1
1416 continue
1417 # set-omap to bogus value "foobar"
1418 cmd = ("echo -n foobar | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1419 logging.debug(cmd)
1420 ret = call(cmd, shell=True)
1421 if ret != 0:
1422 logging.error("Bad exit status {ret} from set-omap".format(ret=ret))
1423 ERRORS += 1
1424 continue
1425 # Check set-omap with dry-run
1426 cmd = ("echo -n dryrunbroken | " + CFSD_PREFIX + "--dry-run --pgid {pg} '{json}' set-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1427 logging.debug(cmd)
1428 ret = call(cmd, shell=True, stdout=nullfd)
1429 if ret != 0:
1430 logging.error("Bad exit status {ret} from set-omap".format(ret=ret))
1431 ERRORS += 1
1432 continue
1433 # Check the set-omap
1434 cmd = (CFSD_PREFIX + " --pgid {pg} '{json}' get-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1435 logging.debug(cmd)
1436 getval = check_output(cmd, shell=True)
1437 if ret != 0:
1438 logging.error("Bad exit status {ret} from get-omap".format(ret=ret))
1439 ERRORS += 1
1440 continue
1441 if getval != "foobar":
1442 logging.error("Check of set-omap failed because we got {val}".format(val=getval))
1443 ERRORS += 1
1444 continue
1445 # Test rm-omap
1446 cmd = (CFSD_PREFIX + "'{json}' rm-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1447 logging.debug(cmd)
1448 ret = call(cmd, shell=True)
1449 if ret != 0:
1450 logging.error("Bad exit status {ret} from rm-omap".format(ret=ret))
1451 ERRORS += 1
1452 # Check rm-omap with dry-run
1453 cmd = (CFSD_PREFIX + "--dry-run '{json}' rm-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1454 logging.debug(cmd)
1455 ret = call(cmd, shell=True, stdout=nullfd)
1456 if ret != 0:
1457 logging.error("Bad exit status {ret} from rm-omap".format(ret=ret))
1458 ERRORS += 1
1459 cmd = (CFSD_PREFIX + "'{json}' get-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1460 logging.debug(cmd)
1461 ret = call(cmd, shell=True, stderr=nullfd, stdout=nullfd)
1462 if ret == 0:
1463 logging.error("For rm-omap expect get-omap to fail, but it succeeded")
1464 ERRORS += 1
1465 # Put back value
1466 cmd = ("echo -n {val} | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey, val=val)
1467 logging.debug(cmd)
1468 ret = call(cmd, shell=True)
1469 if ret != 0:
1470 logging.error("Bad exit status {ret} from set-omap".format(ret=ret))
1471 ERRORS += 1
1472 continue
1473
1474 # Test dump
1475 print("Test dump")
1476 for nspace in db.keys():
1477 for basename in db[nspace].keys():
1478 file = os.path.join(DATADIR, nspace + "-" + basename + "__head")
1479 JSON = db[nspace][basename]['json']
1480 jsondict = json.loads(JSON)
1481 for pg in OBJREPPGS:
1482 OSDS = get_osds(pg, OSDDIR)
1483 for osd in OSDS:
1484 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1485 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1486 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1487 if not fnames:
1488 continue
1489 if int(basename.split(REP_NAME)[1]) > int(NUM_CLONED_REP_OBJECTS):
1490 continue
1491 logging.debug("REPobject " + JSON)
1492 cmd = (CFSD_PREFIX + " '{json}' dump | grep '\"snap\": 1,' > /dev/null").format(osd=osd, json=JSON)
1493 logging.debug(cmd)
1494 ret = call(cmd, shell=True)
1495 if ret != 0:
1496 logging.error("Invalid dump for {json}".format(json=JSON))
1497 ERRORS += 1
1498 if 'shard_id' in jsondict[1]:
1499 logging.debug("ECobject " + JSON)
1500 for pg in OBJECPGS:
1501 OSDS = get_osds(pg, OSDDIR)
1502 jsondict = json.loads(JSON)
1503 for osd in OSDS:
1504 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1505 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1506 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1507 if not fnames:
1508 continue
1509 if int(basename.split(EC_NAME)[1]) > int(NUM_EC_OBJECTS):
1510 continue
1511 # Fix shard_id since we only have one json instance for each object
1512 jsondict[1]['shard_id'] = int(pg.split('s')[1])
1513 cmd = (CFSD_PREFIX + " '{json}' dump | grep '\"hinfo\": [{{]' > /dev/null").format(osd=osd, json=json.dumps((pg, jsondict[1])))
1514 logging.debug(cmd)
1515 ret = call(cmd, shell=True)
1516 if ret != 0:
1517 logging.error("Invalid dump for {json}".format(json=JSON))
1518
1519 print("Test list-attrs get-attr")
1520 ATTRFILE = r"/tmp/attrs.{pid}".format(pid=pid)
1521 VALFILE = r"/tmp/val.{pid}".format(pid=pid)
1522 for nspace in db.keys():
1523 for basename in db[nspace].keys():
1524 file = os.path.join(DATADIR, nspace + "-" + basename)
1525 JSON = db[nspace][basename]['json']
1526 jsondict = json.loads(JSON)
1527
1528 if 'shard_id' in jsondict[1]:
1529 logging.debug("ECobject " + JSON)
1530 found = 0
1531 for pg in OBJECPGS:
1532 OSDS = get_osds(pg, OSDDIR)
1533 # Fix shard_id since we only have one json instance for each object
1534 jsondict[1]['shard_id'] = int(pg.split('s')[1])
1535 JSON = json.dumps((pg, jsondict[1]))
1536 for osd in OSDS:
1537 cmd = (CFSD_PREFIX + " '{json}' get-attr hinfo_key").format(osd=osd, json=JSON)
1538 logging.debug("TRY: " + cmd)
1539 try:
1540 out = check_output(cmd, shell=True, stderr=subprocess.STDOUT)
1541 logging.debug("FOUND: {json} in {osd} has value '{val}'".format(osd=osd, json=JSON, val=out))
1542 found += 1
1543 except subprocess.CalledProcessError as e:
1544 if "No such file or directory" not in e.output and "No data available" not in e.output:
1545 raise
1546 # Assuming k=2 m=1 for the default ec pool
1547 if found != 3:
1548 logging.error("{json} hinfo_key found {found} times instead of 3".format(json=JSON, found=found))
1549 ERRORS += 1
1550
1551 for pg in ALLPGS:
1552 # Make sure rep obj with rep pg or ec obj with ec pg
1553 if ('shard_id' in jsondict[1]) != (pg.find('s') > 0):
1554 continue
1555 if 'shard_id' in jsondict[1]:
1556 # Fix shard_id since we only have one json instance for each object
1557 jsondict[1]['shard_id'] = int(pg.split('s')[1])
1558 JSON = json.dumps((pg, jsondict[1]))
1559 OSDS = get_osds(pg, OSDDIR)
1560 for osd in OSDS:
1561 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1562 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1563 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1564 if not fnames:
1565 continue
1566 afd = open(ATTRFILE, "wb")
1567 cmd = (CFSD_PREFIX + " '{json}' list-attrs").format(osd=osd, json=JSON)
1568 logging.debug(cmd)
1569 ret = call(cmd, shell=True, stdout=afd)
1570 afd.close()
1571 if ret != 0:
1572 logging.error("list-attrs failed with {ret}".format(ret=ret))
1573 ERRORS += 1
1574 continue
1575 keys = get_lines(ATTRFILE)
1576 values = dict(db[nspace][basename]["xattr"])
1577 for key in keys:
1578 if key == "_" or key == "snapset" or key == "hinfo_key":
1579 continue
1580 key = key.strip("_")
1581 if key not in values:
1582 logging.error("Unexpected key {key} present".format(key=key))
1583 ERRORS += 1
1584 continue
1585 exp = values.pop(key)
1586 vfd = open(VALFILE, "wb")
1587 cmd = (CFSD_PREFIX + " '{json}' get-attr {key}").format(osd=osd, json=JSON, key="_" + key)
1588 logging.debug(cmd)
1589 ret = call(cmd, shell=True, stdout=vfd)
1590 vfd.close()
1591 if ret != 0:
1592 logging.error("get-attr failed with {ret}".format(ret=ret))
1593 ERRORS += 1
1594 continue
1595 lines = get_lines(VALFILE)
1596 val = lines[0]
1597 if exp != val:
1598 logging.error("For key {key} got value {got} instead of {expected}".format(key=key, got=val, expected=exp))
1599 ERRORS += 1
1600 if len(values) != 0:
1601 logging.error("Not all keys found, remaining keys:")
1602 print(values)
1603
1604 print("Test --op meta-list")
1605 tmpfd = open(TMPFILE, "wb")
1606 cmd = (CFSD_PREFIX + "--op meta-list").format(osd=ONEOSD)
1607 logging.debug(cmd)
1608 ret = call(cmd, shell=True, stdout=tmpfd)
1609 if ret != 0:
1610 logging.error("Bad exit status {ret} from --op meta-list request".format(ret=ret))
1611 ERRORS += 1
1612
1613 print("Test get-bytes on meta")
1614 tmpfd.close()
1615 lines = get_lines(TMPFILE)
1616 JSONOBJ = sorted(set(lines))
1617 for JSON in JSONOBJ:
1618 (pgid, jsondict) = json.loads(JSON)
1619 if pgid != "meta":
1620 logging.error("pgid incorrect for --op meta-list {pgid}".format(pgid=pgid))
1621 ERRORS += 1
1622 if jsondict['namespace'] != "":
1623 logging.error("namespace non null --op meta-list {ns}".format(ns=jsondict['namespace']))
1624 ERRORS += 1
1625 logging.info(JSON)
1626 try:
1627 os.unlink(GETNAME)
1628 except:
1629 pass
1630 cmd = (CFSD_PREFIX + "'{json}' get-bytes {fname}").format(osd=ONEOSD, json=JSON, fname=GETNAME)
1631 logging.debug(cmd)
1632 ret = call(cmd, shell=True)
1633 if ret != 0:
1634 logging.error("Bad exit status {ret}".format(ret=ret))
1635 ERRORS += 1
1636
1637 try:
1638 os.unlink(GETNAME)
1639 except:
1640 pass
1641 try:
1642 os.unlink(TESTNAME)
1643 except:
1644 pass
1645
1646 print("Test pg info")
1647 for pg in ALLREPPGS + ALLECPGS:
1648 for osd in get_osds(pg, OSDDIR):
1649 cmd = (CFSD_PREFIX + "--op info --pgid {pg} | grep '\"pgid\": \"{pg}\"'").format(osd=osd, pg=pg)
1650 logging.debug(cmd)
1651 ret = call(cmd, shell=True, stdout=nullfd)
1652 if ret != 0:
1653 logging.error("Getting info failed for pg {pg} from {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1654 ERRORS += 1
1655
1656 print("Test pg logging")
1657 if len(ALLREPPGS + ALLECPGS) == len(OBJREPPGS + OBJECPGS):
1658 logging.warning("All PGs have objects, so no log without modify entries")
1659 for pg in ALLREPPGS + ALLECPGS:
1660 for osd in get_osds(pg, OSDDIR):
1661 tmpfd = open(TMPFILE, "wb")
1662 cmd = (CFSD_PREFIX + "--op log --pgid {pg}").format(osd=osd, pg=pg)
1663 logging.debug(cmd)
1664 ret = call(cmd, shell=True, stdout=tmpfd)
1665 if ret != 0:
1666 logging.error("Getting log failed for pg {pg} from {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1667 ERRORS += 1
1668 HASOBJ = pg in OBJREPPGS + OBJECPGS
1669 MODOBJ = False
1670 for line in get_lines(TMPFILE):
1671 if line.find("modify") != -1:
1672 MODOBJ = True
1673 break
1674 if HASOBJ != MODOBJ:
1675 logging.error("Bad log for pg {pg} from {osd}".format(pg=pg, osd=osd))
1676 MSG = (HASOBJ and [""] or ["NOT "])[0]
1677 print("Log should {msg}have a modify entry".format(msg=MSG))
1678 ERRORS += 1
1679
1680 try:
1681 os.unlink(TMPFILE)
1682 except:
1683 pass
1684
1685 print("Test list-pgs")
1686 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1687
1688 CHECK_PGS = get_osd_pgs(os.path.join(OSDDIR, osd), None)
1689 CHECK_PGS = sorted(CHECK_PGS)
1690
1691 cmd = (CFSD_PREFIX + "--op list-pgs").format(osd=osd)
1692 logging.debug(cmd)
1693 TEST_PGS = check_output(cmd, shell=True).split("\n")
1694 TEST_PGS = sorted(TEST_PGS)[1:] # Skip extra blank line
1695
1696 if TEST_PGS != CHECK_PGS:
1697 logging.error("list-pgs got wrong result for osd.{osd}".format(osd=osd))
1698 logging.error("Expected {pgs}".format(pgs=CHECK_PGS))
1699 logging.error("Got {pgs}".format(pgs=TEST_PGS))
1700 ERRORS += 1
1701
1702 EXP_ERRORS = 0
1703 print("Test pg export --dry-run")
1704 pg = ALLREPPGS[0]
1705 osd = get_osds(pg, OSDDIR)[0]
1706 fname = "/tmp/fname.{pid}".format(pid=pid)
1707 cmd = (CFSD_PREFIX + "--dry-run --op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
1708 logging.debug(cmd)
1709 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1710 if ret != 0:
1711 logging.error("Exporting --dry-run failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1712 EXP_ERRORS += 1
1713 elif os.path.exists(fname):
1714 logging.error("Exporting --dry-run created file")
1715 EXP_ERRORS += 1
1716
1717 cmd = (CFSD_PREFIX + "--dry-run --op export --pgid {pg} > {file}").format(osd=osd, pg=pg, file=fname)
1718 logging.debug(cmd)
1719 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1720 if ret != 0:
1721 logging.error("Exporting --dry-run failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1722 EXP_ERRORS += 1
1723 else:
1724 outdata = get_lines(fname)
1725 if len(outdata) > 0:
1726 logging.error("Exporting --dry-run to stdout not empty")
1727 logging.error("Data: " + outdata)
1728 EXP_ERRORS += 1
1729
1730 os.mkdir(TESTDIR)
1731 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1732 os.mkdir(os.path.join(TESTDIR, osd))
1733 print("Test pg export")
1734 for pg in ALLREPPGS + ALLECPGS:
1735 for osd in get_osds(pg, OSDDIR):
1736 mydir = os.path.join(TESTDIR, osd)
1737 fname = os.path.join(mydir, pg)
1738 if pg == ALLREPPGS[0]:
1739 cmd = (CFSD_PREFIX + "--op export --pgid {pg} > {file}").format(osd=osd, pg=pg, file=fname)
1740 elif pg == ALLREPPGS[1]:
1741 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file - > {file}").format(osd=osd, pg=pg, file=fname)
1742 else:
1743 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
1744 logging.debug(cmd)
1745 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1746 if ret != 0:
1747 logging.error("Exporting failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1748 EXP_ERRORS += 1
1749
1750 ERRORS += EXP_ERRORS
1751
1752 print("Test clear-data-digest")
1753 for nspace in db.keys():
1754 for basename in db[nspace].keys():
1755 JSON = db[nspace][basename]['json']
1756 cmd = (CFSD_PREFIX + "'{json}' clear-data-digest").format(osd='osd0', json=JSON)
1757 logging.debug(cmd)
1758 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1759 if ret != 0:
1760 logging.error("Clearing data digest failed for {json}".format(json=JSON))
1761 ERRORS += 1
1762 break
1763 cmd = (CFSD_PREFIX + "'{json}' dump | grep '\"data_digest\": \"0xff'").format(osd='osd0', json=JSON)
1764 logging.debug(cmd)
1765 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1766 if ret != 0:
1767 logging.error("Data digest not cleared for {json}".format(json=JSON))
1768 ERRORS += 1
1769 break
1770 break
1771 break
1772
1773 print("Test pg removal")
1774 RM_ERRORS = 0
1775 for pg in ALLREPPGS + ALLECPGS:
1776 for osd in get_osds(pg, OSDDIR):
1777 # This should do nothing
1778 cmd = (CFSD_PREFIX + "--op remove --pgid {pg} --dry-run").format(pg=pg, osd=osd)
1779 logging.debug(cmd)
1780 ret = call(cmd, shell=True, stdout=nullfd)
1781 if ret != 0:
1782 logging.error("Removing --dry-run failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1783 RM_ERRORS += 1
1784 cmd = (CFSD_PREFIX + "--force --op remove --pgid {pg}").format(pg=pg, osd=osd)
1785 logging.debug(cmd)
1786 ret = call(cmd, shell=True, stdout=nullfd)
1787 if ret != 0:
1788 logging.error("Removing failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1789 RM_ERRORS += 1
1790
1791 ERRORS += RM_ERRORS
1792
1793 IMP_ERRORS = 0
1794 if EXP_ERRORS == 0 and RM_ERRORS == 0:
1795 print("Test pg import")
1796 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1797 dir = os.path.join(TESTDIR, osd)
1798 PGS = [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))]
1799 for pg in PGS:
1800 file = os.path.join(dir, pg)
1801 # Make sure this doesn't crash
1802 cmd = (CFSD_PREFIX + "--op dump-export --file {file}").format(osd=osd, file=file)
1803 logging.debug(cmd)
1804 ret = call(cmd, shell=True, stdout=nullfd)
1805 if ret != 0:
1806 logging.error("Dump-export failed from {file} with {ret}".format(file=file, ret=ret))
1807 IMP_ERRORS += 1
1808 # This should do nothing
1809 cmd = (CFSD_PREFIX + "--op import --file {file} --dry-run").format(osd=osd, file=file)
1810 logging.debug(cmd)
1811 ret = call(cmd, shell=True, stdout=nullfd)
1812 if ret != 0:
1813 logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret))
1814 IMP_ERRORS += 1
1815 if pg == PGS[0]:
1816 cmd = ("cat {file} |".format(file=file) + CFSD_PREFIX + "--op import").format(osd=osd)
1817 elif pg == PGS[1]:
1818 cmd = (CFSD_PREFIX + "--op import --file - --pgid {pg} < {file}").format(osd=osd, file=file, pg=pg)
1819 else:
1820 cmd = (CFSD_PREFIX + "--op import --file {file}").format(osd=osd, file=file)
1821 logging.debug(cmd)
1822 ret = call(cmd, shell=True, stdout=nullfd)
1823 if ret != 0:
1824 logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret))
1825 IMP_ERRORS += 1
1826 else:
1827 logging.warning("SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES")
1828
1829 ERRORS += IMP_ERRORS
1830 logging.debug(cmd)
1831
1832 if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0:
1833 print("Verify replicated import data")
1834 data_errors, _ = check_data(DATADIR, TMPFILE, OSDDIR, REP_NAME)
1835 ERRORS += data_errors
1836 else:
1837 logging.warning("SKIPPING CHECKING IMPORT DATA DUE TO PREVIOUS FAILURES")
1838
1839 print("Test all --op dump-journal again")
1840 ALLOSDS = [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]
1841 ERRORS += test_dump_journal(CFSD_PREFIX, ALLOSDS)
1842
1843 vstart(new=False)
1844 wait_for_health()
1845
1846 if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0:
1847 print("Verify erasure coded import data")
1848 ERRORS += verify(DATADIR, EC_POOL, EC_NAME, db)
1849 # Check replicated data/xattr/omap using rados
1850 print("Verify replicated import data using rados")
1851 ERRORS += verify(DATADIR, REP_POOL, REP_NAME, db)
1852
1853 if EXP_ERRORS == 0:
1854 NEWPOOL = "rados-import-pool"
1855 cmd = "{path}/ceph osd pool create {pool} 8".format(pool=NEWPOOL, path=CEPH_BIN)
1856 logging.debug(cmd)
1857 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1858
1859 print("Test rados import")
1860 first = True
1861 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1862 dir = os.path.join(TESTDIR, osd)
1863 for pg in [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))]:
1864 if pg.find("{id}.".format(id=REPID)) != 0:
1865 continue
1866 file = os.path.join(dir, pg)
1867 if first:
1868 first = False
1869 # This should do nothing
1870 cmd = "{path}/rados import -p {pool} --dry-run {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN)
1871 logging.debug(cmd)
1872 ret = call(cmd, shell=True, stdout=nullfd)
1873 if ret != 0:
1874 logging.error("Rados import --dry-run failed from {file} with {ret}".format(file=file, ret=ret))
1875 ERRORS += 1
1876 cmd = "{path}/rados -p {pool} ls".format(pool=NEWPOOL, path=CEPH_BIN)
1877 logging.debug(cmd)
1878 data = check_output(cmd, shell=True)
1879 if data:
1880 logging.error("'{data}'".format(data=data))
1881 logging.error("Found objects after dry-run")
1882 ERRORS += 1
1883 cmd = "{path}/rados import -p {pool} {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN)
1884 logging.debug(cmd)
1885 ret = call(cmd, shell=True, stdout=nullfd)
1886 if ret != 0:
1887 logging.error("Rados import failed from {file} with {ret}".format(file=file, ret=ret))
1888 ERRORS += 1
1889 cmd = "{path}/rados import -p {pool} --no-overwrite {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN)
1890 logging.debug(cmd)
1891 ret = call(cmd, shell=True, stdout=nullfd)
1892 if ret != 0:
1893 logging.error("Rados import --no-overwrite failed from {file} with {ret}".format(file=file, ret=ret))
1894 ERRORS += 1
1895
1896 ERRORS += verify(DATADIR, NEWPOOL, REP_NAME, db)
1897 else:
1898 logging.warning("SKIPPING IMPORT-RADOS TESTS DUE TO PREVIOUS FAILURES")
1899
1900 # Clear directories of previous portion
1901 call("/bin/rm -rf {dir}".format(dir=TESTDIR), shell=True)
1902 call("/bin/rm -rf {dir}".format(dir=DATADIR), shell=True)
1903 os.mkdir(TESTDIR)
1904 os.mkdir(DATADIR)
1905
1906 # Cause SPLIT_POOL to split and test import with object/log filtering
1907 print("Testing import all objects after a split")
1908 SPLIT_POOL = "split_pool"
1909 PG_COUNT = 1
1910 SPLIT_OBJ_COUNT = 5
1911 SPLIT_NSPACE_COUNT = 2
1912 SPLIT_NAME = "split"
1913 cmd = "{path}/ceph osd pool create {pool} {pg} {pg} replicated".format(pool=SPLIT_POOL, pg=PG_COUNT, path=CEPH_BIN)
1914 logging.debug(cmd)
1915 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1916 SPLITID = get_pool_id(SPLIT_POOL, nullfd)
1917 pool_size = int(check_output("{path}/ceph osd pool get {pool} size".format(pool=SPLIT_POOL, path=CEPH_BIN), shell=True, stderr=nullfd).split(" ")[1])
1918 EXP_ERRORS = 0
1919 RM_ERRORS = 0
1920 IMP_ERRORS = 0
1921
1922 objects = range(1, SPLIT_OBJ_COUNT + 1)
1923 nspaces = range(SPLIT_NSPACE_COUNT)
1924 for n in nspaces:
1925 nspace = get_nspace(n)
1926
1927 for i in objects:
1928 NAME = SPLIT_NAME + "{num}".format(num=i)
1929 LNAME = nspace + "-" + NAME
1930 DDNAME = os.path.join(DATADIR, LNAME)
1931 DDNAME += "__head"
1932
1933 cmd = "rm -f " + DDNAME
1934 logging.debug(cmd)
1935 call(cmd, shell=True)
1936
1937 if i == 1:
1938 dataline = range(DATALINECOUNT)
1939 else:
1940 dataline = range(1)
1941 fd = open(DDNAME, "w")
1942 data = "This is the split data for " + LNAME + "\n"
1943 for _ in dataline:
1944 fd.write(data)
1945 fd.close()
1946
1947 cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=SPLIT_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
1948 logging.debug(cmd)
1949 ret = call(cmd, shell=True, stderr=nullfd)
1950 if ret != 0:
1951 logging.critical("Rados put command failed with {ret}".format(ret=ret))
1952 return 1
1953
1954 wait_for_health()
1955 kill_daemons()
1956
1957 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1958 os.mkdir(os.path.join(TESTDIR, osd))
1959
1960 pg = "{pool}.0".format(pool=SPLITID)
1961 EXPORT_PG = pg
1962
1963 export_osds = get_osds(pg, OSDDIR)
1964 for osd in export_osds:
1965 mydir = os.path.join(TESTDIR, osd)
1966 fname = os.path.join(mydir, pg)
1967 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
1968 logging.debug(cmd)
1969 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1970 if ret != 0:
1971 logging.error("Exporting failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1972 EXP_ERRORS += 1
1973
1974 ERRORS += EXP_ERRORS
1975
1976 if EXP_ERRORS == 0:
1977 vstart(new=False)
1978 wait_for_health()
1979
1980 cmd = "{path}/ceph osd pool set {pool} pg_num 2".format(pool=SPLIT_POOL, path=CEPH_BIN)
1981 logging.debug(cmd)
1982 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1983 time.sleep(5)
1984 wait_for_health()
1985
1986 kill_daemons()
1987
1988 # Now 2 PGs, poolid.0 and poolid.1
1989 # make note of pgs before we remove the pgs...
1990 osds = get_osds("{pool}.0".format(pool=SPLITID), OSDDIR);
1991 for seed in range(2):
1992 pg = "{pool}.{seed}".format(pool=SPLITID, seed=seed)
1993
1994 for osd in osds:
1995 cmd = (CFSD_PREFIX + "--force --op remove --pgid {pg}").format(pg=pg, osd=osd)
1996 logging.debug(cmd)
1997 ret = call(cmd, shell=True, stdout=nullfd)
1998
1999 which = 0
2000 for osd in osds:
2001 # This is weird. The export files are based on only the EXPORT_PG
2002 # and where that pg was before the split. Use 'which' to use all
2003 # export copies in import.
2004 mydir = os.path.join(TESTDIR, export_osds[which])
2005 fname = os.path.join(mydir, EXPORT_PG)
2006 which += 1
2007 cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=osd, pg=EXPORT_PG, file=fname)
2008 logging.debug(cmd)
2009 ret = call(cmd, shell=True, stdout=nullfd)
2010 if ret != 0:
2011 logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret))
2012 IMP_ERRORS += 1
2013
2014 ERRORS += IMP_ERRORS
2015
2016 # Start up again to make sure imports didn't corrupt anything
2017 if IMP_ERRORS == 0:
2018 print("Verify split import data")
2019 data_errors, count = check_data(DATADIR, TMPFILE, OSDDIR, SPLIT_NAME)
2020 ERRORS += data_errors
2021 if count != (SPLIT_OBJ_COUNT * SPLIT_NSPACE_COUNT * pool_size):
2022 logging.error("Incorrect number of replicas seen {count}".format(count=count))
2023 ERRORS += 1
2024 vstart(new=False)
2025 wait_for_health()
2026
2027 call("/bin/rm -rf {dir}".format(dir=TESTDIR), shell=True)
2028 call("/bin/rm -rf {dir}".format(dir=DATADIR), shell=True)
2029
2030 ERRORS += test_removeall(CFSD_PREFIX, db, OBJREPPGS, REP_POOL, CEPH_BIN, OSDDIR, REP_NAME, NUM_CLONED_REP_OBJECTS)
2031
2032 # vstart() starts 4 OSDs
2033 ERRORS += test_get_set_osdmap(CFSD_PREFIX, list(range(4)), ALLOSDS)
2034 ERRORS += test_get_set_inc_osdmap(CFSD_PREFIX, ALLOSDS[0])
2035
2036 kill_daemons()
2037 CORES = [f for f in os.listdir(CEPH_DIR) if f.startswith("core.")]
2038 if CORES:
2039 CORE_DIR = os.path.join("/tmp", "cores.{pid}".format(pid=os.getpid()))
2040 os.mkdir(CORE_DIR)
2041 call("/bin/mv {ceph_dir}/core.* {core_dir}".format(ceph_dir=CEPH_DIR, core_dir=CORE_DIR), shell=True)
2042 logging.error("Failure due to cores found")
2043 logging.error("See {core_dir} for cores".format(core_dir=CORE_DIR))
2044 ERRORS += len(CORES)
2045
2046 if ERRORS == 0:
2047 print("TEST PASSED")
2048 return 0
2049 else:
2050 print("TEST FAILED WITH {errcount} ERRORS".format(errcount=ERRORS))
2051 return 1
2052
2053
2054 def remove_btrfs_subvolumes(path):
2055 if platform.system() == "FreeBSD":
2056 return
2057 result = subprocess.Popen("stat -f -c '%%T' %s" % path, shell=True, stdout=subprocess.PIPE)
2058 for line in result.stdout:
2059 filesystem = decode(line).rstrip('\n')
2060 if filesystem == "btrfs":
2061 result = subprocess.Popen("sudo btrfs subvolume list %s" % path, shell=True, stdout=subprocess.PIPE)
2062 for line in result.stdout:
2063 subvolume = decode(line).split()[8]
2064 # extracting the relative volume name
2065 m = re.search(".*(%s.*)" % path, subvolume)
2066 if m:
2067 found = m.group(1)
2068 call("sudo btrfs subvolume delete %s" % found, shell=True)
2069
2070
2071 if __name__ == "__main__":
2072 status = 1
2073 try:
2074 status = main(sys.argv[1:])
2075 finally:
2076 kill_daemons()
2077 os.chdir(CEPH_BUILD_DIR)
2078 remove_btrfs_subvolumes(CEPH_DIR)
2079 call("/bin/rm -fr {dir}".format(dir=CEPH_DIR), shell=True)
2080 sys.exit(status)