]> git.proxmox.com Git - ceph.git/blame - ceph/qa/standalone/special/ceph_objectstore_tool.py
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / qa / standalone / special / ceph_objectstore_tool.py
CommitLineData
7c673cae
FG
1#!/usr/bin/env python
2
3from __future__ import print_function
4from subprocess import call
5try:
6 from subprocess import check_output
7except ImportError:
8 def check_output(*popenargs, **kwargs):
9 import subprocess
10 # backported from python 2.7 stdlib
11 process = subprocess.Popen(
12 stdout=subprocess.PIPE, *popenargs, **kwargs)
13 output, unused_err = process.communicate()
14 retcode = process.poll()
15 if retcode:
16 cmd = kwargs.get("args")
17 if cmd is None:
18 cmd = popenargs[0]
19 error = subprocess.CalledProcessError(retcode, cmd)
20 error.output = output
21 raise error
22 return output
23
24import filecmp
25import os
26import subprocess
27import math
28import time
29import sys
30import re
31import logging
32import json
33import tempfile
34import platform
35
36try:
37 from subprocess import DEVNULL
38except ImportError:
39 DEVNULL = open(os.devnull, "wb")
40
41logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.WARNING)
42
43
44if sys.version_info[0] >= 3:
45 def decode(s):
46 return s.decode('utf-8')
47
48 def check_output(*args, **kwargs):
49 return decode(subprocess.check_output(*args, **kwargs))
50else:
51 def decode(s):
52 return s
53
54
55
56def wait_for_health():
57 print("Wait for health_ok...", end="")
58 tries = 0
59 while call("{path}/ceph health 2> /dev/null | grep -v 'HEALTH_OK\|HEALTH_WARN' > /dev/null".format(path=CEPH_BIN), shell=True) == 0:
60 tries += 1
61 if tries == 150:
62 raise Exception("Time exceeded to go to health")
63 time.sleep(1)
64 print("DONE")
65
66
67def get_pool_id(name, nullfd):
68 cmd = "{path}/ceph osd pool stats {pool}".format(pool=name, path=CEPH_BIN).split()
69 # pool {pool} id # .... grab the 4 field
70 return check_output(cmd, stderr=nullfd).split()[3]
71
72
73# return a list of unique PGS given an osd subdirectory
74def get_osd_pgs(SUBDIR, ID):
75 PGS = []
76 if ID:
77 endhead = re.compile("{id}.*_head$".format(id=ID))
78 DIR = os.path.join(SUBDIR, "current")
79 PGS += [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and (ID is None or endhead.match(f))]
80 PGS = [re.sub("_head", "", p) for p in PGS if "_head" in p]
81 return PGS
82
83
84# return a sorted list of unique PGs given a directory
85def get_pgs(DIR, ID):
86 OSDS = [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and f.find("osd") == 0]
87 PGS = []
88 for d in OSDS:
89 SUBDIR = os.path.join(DIR, d)
90 PGS += get_osd_pgs(SUBDIR, ID)
91 return sorted(set(PGS))
92
93
94# return a sorted list of PGS a subset of ALLPGS that contain objects with prefix specified
95def get_objs(ALLPGS, prefix, DIR, ID):
96 OSDS = [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and f.find("osd") == 0]
97 PGS = []
98 for d in OSDS:
99 DIRL2 = os.path.join(DIR, d)
100 SUBDIR = os.path.join(DIRL2, "current")
101 for p in ALLPGS:
102 PGDIR = p + "_head"
103 if not os.path.isdir(os.path.join(SUBDIR, PGDIR)):
104 continue
105 FINALDIR = os.path.join(SUBDIR, PGDIR)
106 # See if there are any objects there
107 if any(f for f in [val for _, _, fl in os.walk(FINALDIR) for val in fl] if f.startswith(prefix)):
108 PGS += [p]
109 return sorted(set(PGS))
110
111
112# return a sorted list of OSDS which have data from a given PG
113def get_osds(PG, DIR):
114 ALLOSDS = [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and f.find("osd") == 0]
115 OSDS = []
116 for d in ALLOSDS:
117 DIRL2 = os.path.join(DIR, d)
118 SUBDIR = os.path.join(DIRL2, "current")
119 PGDIR = PG + "_head"
120 if not os.path.isdir(os.path.join(SUBDIR, PGDIR)):
121 continue
122 OSDS += [d]
123 return sorted(OSDS)
124
125
126def get_lines(filename):
127 tmpfd = open(filename, "r")
128 line = True
129 lines = []
130 while line:
131 line = tmpfd.readline().rstrip('\n')
132 if line:
133 lines += [line]
134 tmpfd.close()
135 os.unlink(filename)
136 return lines
137
138
139def cat_file(level, filename):
140 if level < logging.getLogger().getEffectiveLevel():
141 return
142 print("File: " + filename)
143 with open(filename, "r") as f:
144 while True:
145 line = f.readline().rstrip('\n')
146 if not line:
147 break
148 print(line)
149 print("<EOF>")
150
151
152def vstart(new, opt=""):
153 print("vstarting....", end="")
154 NEW = new and "-n" or "-N"
11fdf7f2 155 call("MON=1 OSD=4 MDS=0 MGR=1 CEPH_PORT=7400 MGR_PYTHON_PATH={path}/src/pybind/mgr {path}/src/vstart.sh --filestore --short -l {new} -d {opt} > /dev/null 2>&1".format(new=NEW, opt=opt, path=CEPH_ROOT), shell=True)
7c673cae
FG
156 print("DONE")
157
158
159def test_failure(cmd, errmsg, tty=False):
160 if tty:
161 try:
162 ttyfd = open("/dev/tty", "rwb")
163 except Exception as e:
164 logging.info(str(e))
165 logging.info("SKIP " + cmd)
166 return 0
167 TMPFILE = r"/tmp/tmp.{pid}".format(pid=os.getpid())
168 tmpfd = open(TMPFILE, "wb")
169
170 logging.debug(cmd)
171 if tty:
172 ret = call(cmd, shell=True, stdin=ttyfd, stdout=ttyfd, stderr=tmpfd)
173 ttyfd.close()
174 else:
175 ret = call(cmd, shell=True, stderr=tmpfd)
176 tmpfd.close()
177 if ret == 0:
178 logging.error(cmd)
179 logging.error("Should have failed, but got exit 0")
180 return 1
181 lines = get_lines(TMPFILE)
182 matched = [ l for l in lines if errmsg in l ]
183 if any(matched):
184 logging.info("Correctly failed with message \"" + matched[0] + "\"")
185 return 0
186 else:
187 logging.error("Command: " + cmd )
188 logging.error("Bad messages to stderr \"" + str(lines) + "\"")
189 logging.error("Expected \"" + errmsg + "\"")
190 return 1
191
192
193def get_nspace(num):
194 if num == 0:
195 return ""
196 return "ns{num}".format(num=num)
197
198
199def verify(DATADIR, POOL, NAME_PREFIX, db):
200 TMPFILE = r"/tmp/tmp.{pid}".format(pid=os.getpid())
201 ERRORS = 0
202 for rawnsfile in [f for f in os.listdir(DATADIR) if f.split('-')[1].find(NAME_PREFIX) == 0]:
203 nsfile = rawnsfile.split("__")[0]
204 clone = rawnsfile.split("__")[1]
205 nspace = nsfile.split("-")[0]
206 file = nsfile.split("-")[1]
207 # Skip clones
208 if clone != "head":
209 continue
210 path = os.path.join(DATADIR, rawnsfile)
211 try:
212 os.unlink(TMPFILE)
213 except:
214 pass
215 cmd = "{path}/rados -p {pool} -N '{nspace}' get {file} {out}".format(pool=POOL, file=file, out=TMPFILE, nspace=nspace, path=CEPH_BIN)
216 logging.debug(cmd)
217 call(cmd, shell=True, stdout=DEVNULL, stderr=DEVNULL)
218 cmd = "diff -q {src} {result}".format(src=path, result=TMPFILE)
219 logging.debug(cmd)
220 ret = call(cmd, shell=True)
221 if ret != 0:
222 logging.error("{file} data not imported properly".format(file=file))
223 ERRORS += 1
224 try:
225 os.unlink(TMPFILE)
226 except:
227 pass
228 for key, val in db[nspace][file]["xattr"].items():
229 cmd = "{path}/rados -p {pool} -N '{nspace}' getxattr {name} {key}".format(pool=POOL, name=file, key=key, nspace=nspace, path=CEPH_BIN)
230 logging.debug(cmd)
231 getval = check_output(cmd, shell=True, stderr=DEVNULL)
232 logging.debug("getxattr {key} {val}".format(key=key, val=getval))
233 if getval != val:
234 logging.error("getxattr of key {key} returned wrong val: {get} instead of {orig}".format(key=key, get=getval, orig=val))
235 ERRORS += 1
236 continue
237 hdr = db[nspace][file].get("omapheader", "")
238 cmd = "{path}/rados -p {pool} -N '{nspace}' getomapheader {name} {file}".format(pool=POOL, name=file, nspace=nspace, file=TMPFILE, path=CEPH_BIN)
239 logging.debug(cmd)
240 ret = call(cmd, shell=True, stderr=DEVNULL)
241 if ret != 0:
242 logging.error("rados getomapheader returned {ret}".format(ret=ret))
243 ERRORS += 1
244 else:
245 getlines = get_lines(TMPFILE)
246 assert(len(getlines) == 0 or len(getlines) == 1)
247 if len(getlines) == 0:
248 gethdr = ""
249 else:
250 gethdr = getlines[0]
251 logging.debug("header: {hdr}".format(hdr=gethdr))
252 if gethdr != hdr:
253 logging.error("getomapheader returned wrong val: {get} instead of {orig}".format(get=gethdr, orig=hdr))
254 ERRORS += 1
255 for key, val in db[nspace][file]["omap"].items():
256 cmd = "{path}/rados -p {pool} -N '{nspace}' getomapval {name} {key} {file}".format(pool=POOL, name=file, key=key, nspace=nspace, file=TMPFILE, path=CEPH_BIN)
257 logging.debug(cmd)
258 ret = call(cmd, shell=True, stderr=DEVNULL)
259 if ret != 0:
260 logging.error("getomapval returned {ret}".format(ret=ret))
261 ERRORS += 1
262 continue
263 getlines = get_lines(TMPFILE)
264 if len(getlines) != 1:
265 logging.error("Bad data from getomapval {lines}".format(lines=getlines))
266 ERRORS += 1
267 continue
268 getval = getlines[0]
269 logging.debug("getomapval {key} {val}".format(key=key, val=getval))
270 if getval != val:
271 logging.error("getomapval returned wrong val: {get} instead of {orig}".format(get=getval, orig=val))
272 ERRORS += 1
273 try:
274 os.unlink(TMPFILE)
275 except:
276 pass
277 return ERRORS
278
279
280def check_journal(jsondict):
281 errors = 0
282 if 'header' not in jsondict:
283 logging.error("Key 'header' not in dump-journal")
284 errors += 1
285 elif 'max_size' not in jsondict['header']:
286 logging.error("Key 'max_size' not in dump-journal header")
287 errors += 1
288 else:
289 print("\tJournal max_size = {size}".format(size=jsondict['header']['max_size']))
290 if 'entries' not in jsondict:
291 logging.error("Key 'entries' not in dump-journal output")
292 errors += 1
293 elif len(jsondict['entries']) == 0:
294 logging.info("No entries in journal found")
295 else:
296 errors += check_journal_entries(jsondict['entries'])
297 return errors
298
299
300def check_journal_entries(entries):
301 errors = 0
302 for enum in range(len(entries)):
303 if 'offset' not in entries[enum]:
304 logging.error("No 'offset' key in entry {e}".format(e=enum))
305 errors += 1
306 if 'seq' not in entries[enum]:
307 logging.error("No 'seq' key in entry {e}".format(e=enum))
308 errors += 1
309 if 'transactions' not in entries[enum]:
310 logging.error("No 'transactions' key in entry {e}".format(e=enum))
311 errors += 1
312 elif len(entries[enum]['transactions']) == 0:
313 logging.error("No transactions found in entry {e}".format(e=enum))
314 errors += 1
315 else:
316 errors += check_entry_transactions(entries[enum], enum)
317 return errors
318
319
320def check_entry_transactions(entry, enum):
321 errors = 0
322 for tnum in range(len(entry['transactions'])):
323 if 'trans_num' not in entry['transactions'][tnum]:
324 logging.error("Key 'trans_num' missing from entry {e} trans {t}".format(e=enum, t=tnum))
325 errors += 1
326 elif entry['transactions'][tnum]['trans_num'] != tnum:
327 ft = entry['transactions'][tnum]['trans_num']
328 logging.error("Bad trans_num ({ft}) entry {e} trans {t}".format(ft=ft, e=enum, t=tnum))
329 errors += 1
330 if 'ops' not in entry['transactions'][tnum]:
331 logging.error("Key 'ops' missing from entry {e} trans {t}".format(e=enum, t=tnum))
332 errors += 1
333 else:
334 errors += check_transaction_ops(entry['transactions'][tnum]['ops'], enum, tnum)
335 return errors
336
337
338def check_transaction_ops(ops, enum, tnum):
339 if len(ops) is 0:
340 logging.warning("No ops found in entry {e} trans {t}".format(e=enum, t=tnum))
341 errors = 0
342 for onum in range(len(ops)):
343 if 'op_num' not in ops[onum]:
344 logging.error("Key 'op_num' missing from entry {e} trans {t} op {o}".format(e=enum, t=tnum, o=onum))
345 errors += 1
346 elif ops[onum]['op_num'] != onum:
347 fo = ops[onum]['op_num']
348 logging.error("Bad op_num ({fo}) from entry {e} trans {t} op {o}".format(fo=fo, e=enum, t=tnum, o=onum))
349 errors += 1
350 if 'op_name' not in ops[onum]:
351 logging.error("Key 'op_name' missing from entry {e} trans {t} op {o}".format(e=enum, t=tnum, o=onum))
352 errors += 1
353 return errors
354
355
356def test_dump_journal(CFSD_PREFIX, osds):
357 ERRORS = 0
358 pid = os.getpid()
359 TMPFILE = r"/tmp/tmp.{pid}".format(pid=pid)
360
361 for osd in osds:
362 # Test --op dump-journal by loading json
363 cmd = (CFSD_PREFIX + "--op dump-journal --format json").format(osd=osd)
364 logging.debug(cmd)
365 tmpfd = open(TMPFILE, "wb")
366 ret = call(cmd, shell=True, stdout=tmpfd)
367 if ret != 0:
368 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
369 ERRORS += 1
370 continue
371 tmpfd.close()
372 tmpfd = open(TMPFILE, "r")
373 jsondict = json.load(tmpfd)
374 tmpfd.close()
375 os.unlink(TMPFILE)
376
377 journal_errors = check_journal(jsondict)
378 if journal_errors is not 0:
379 logging.error(jsondict)
380 ERRORS += journal_errors
381
382 return ERRORS
383
384CEPH_BUILD_DIR = os.environ.get('CEPH_BUILD_DIR')
385CEPH_BIN = os.environ.get('CEPH_BIN')
386CEPH_ROOT = os.environ.get('CEPH_ROOT')
387
388if not CEPH_BUILD_DIR:
389 CEPH_BUILD_DIR=os.getcwd()
390 os.putenv('CEPH_BUILD_DIR', CEPH_BUILD_DIR)
3efd9988 391 CEPH_BIN=os.path.join(CEPH_BUILD_DIR, 'bin')
7c673cae
FG
392 os.putenv('CEPH_BIN', CEPH_BIN)
393 CEPH_ROOT=os.path.dirname(CEPH_BUILD_DIR)
394 os.putenv('CEPH_ROOT', CEPH_ROOT)
3efd9988 395 CEPH_LIB=os.path.join(CEPH_BUILD_DIR, 'lib')
7c673cae
FG
396 os.putenv('CEPH_LIB', CEPH_LIB)
397
3efd9988
FG
398try:
399 os.mkdir("td")
400except:
401 pass # ok if this is already there
402CEPH_DIR = os.path.join(CEPH_BUILD_DIR, os.path.join("td", "cot_dir"))
7c673cae
FG
403CEPH_CONF = os.path.join(CEPH_DIR, 'ceph.conf')
404
405def kill_daemons():
406 call("{path}/init-ceph -c {conf} stop > /dev/null 2>&1".format(conf=CEPH_CONF, path=CEPH_BIN), shell=True)
407
408
409def check_data(DATADIR, TMPFILE, OSDDIR, SPLIT_NAME):
410 repcount = 0
411 ERRORS = 0
412 for rawnsfile in [f for f in os.listdir(DATADIR) if f.split('-')[1].find(SPLIT_NAME) == 0]:
413 nsfile = rawnsfile.split("__")[0]
414 clone = rawnsfile.split("__")[1]
415 nspace = nsfile.split("-")[0]
416 file = nsfile.split("-")[1] + "__" + clone
417 # Skip clones
418 if clone != "head":
419 continue
420 path = os.path.join(DATADIR, rawnsfile)
421 tmpfd = open(TMPFILE, "wb")
422 cmd = "find {dir} -name '{file}_*_{nspace}_*'".format(dir=OSDDIR, file=file, nspace=nspace)
423 logging.debug(cmd)
424 ret = call(cmd, shell=True, stdout=tmpfd)
425 if ret:
426 logging.critical("INTERNAL ERROR")
427 return 1
428 tmpfd.close()
429 obj_locs = get_lines(TMPFILE)
430 if len(obj_locs) == 0:
431 logging.error("Can't find imported object {name}".format(name=file))
432 ERRORS += 1
433 for obj_loc in obj_locs:
434 # For btrfs skip snap_* dirs
435 if re.search("/snap_[0-9]*/", obj_loc) is not None:
436 continue
437 repcount += 1
438 cmd = "diff -q {src} {obj_loc}".format(src=path, obj_loc=obj_loc)
439 logging.debug(cmd)
440 ret = call(cmd, shell=True)
441 if ret != 0:
442 logging.error("{file} data not imported properly into {obj}".format(file=file, obj=obj_loc))
443 ERRORS += 1
444 return ERRORS, repcount
445
446
447def set_osd_weight(CFSD_PREFIX, osd_ids, osd_path, weight):
448 # change the weight of osd.0 to math.pi in the newest osdmap of given osd
449 osdmap_file = tempfile.NamedTemporaryFile(delete=True)
450 cmd = (CFSD_PREFIX + "--op get-osdmap --file {osdmap_file}").format(osd=osd_path,
451 osdmap_file=osdmap_file.name)
452 output = check_output(cmd, shell=True)
453 epoch = int(re.findall('#(\d+)', output)[0])
454
455 new_crush_file = tempfile.NamedTemporaryFile(delete=True)
456 old_crush_file = tempfile.NamedTemporaryFile(delete=True)
457 ret = call("{path}/osdmaptool --export-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name,
458 crush_file=old_crush_file.name, path=CEPH_BIN),
459 stdout=DEVNULL,
460 stderr=DEVNULL,
461 shell=True)
462 assert(ret == 0)
463
464 for osd_id in osd_ids:
465 cmd = "{path}/crushtool -i {crush_file} --reweight-item osd.{osd} {weight} -o {new_crush_file}".format(osd=osd_id,
466 crush_file=old_crush_file.name,
467 weight=weight,
468 new_crush_file=new_crush_file.name, path=CEPH_BIN)
469 ret = call(cmd, stdout=DEVNULL, shell=True)
470 assert(ret == 0)
471 old_crush_file, new_crush_file = new_crush_file, old_crush_file
472
473 # change them back, since we don't need to preapre for another round
474 old_crush_file, new_crush_file = new_crush_file, old_crush_file
475 old_crush_file.close()
476
477 ret = call("{path}/osdmaptool --import-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name,
478 crush_file=new_crush_file.name, path=CEPH_BIN),
479 stdout=DEVNULL,
480 stderr=DEVNULL,
481 shell=True)
482 assert(ret == 0)
483
484 # Minimum test of --dry-run by using it, but not checking anything
485 cmd = CFSD_PREFIX + "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force --dry-run"
486 cmd = cmd.format(osd=osd_path, osdmap_file=osdmap_file.name, epoch=epoch)
487 ret = call(cmd, stdout=DEVNULL, shell=True)
488 assert(ret == 0)
489
490 # osdmaptool increases the epoch of the changed osdmap, so we need to force the tool
491 # to use use a different epoch than the one in osdmap
492 cmd = CFSD_PREFIX + "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force"
493 cmd = cmd.format(osd=osd_path, osdmap_file=osdmap_file.name, epoch=epoch)
494 ret = call(cmd, stdout=DEVNULL, shell=True)
495
496 return ret == 0
497
498def get_osd_weights(CFSD_PREFIX, osd_ids, osd_path):
499 osdmap_file = tempfile.NamedTemporaryFile(delete=True)
500 cmd = (CFSD_PREFIX + "--op get-osdmap --file {osdmap_file}").format(osd=osd_path,
501 osdmap_file=osdmap_file.name)
502 ret = call(cmd, stdout=DEVNULL, shell=True)
503 if ret != 0:
504 return None
505 # we have to read the weights from the crush map, even we can query the weights using
506 # osdmaptool, but please keep in mind, they are different:
507 # item weights in crush map versus weight associated with each osd in osdmap
508 crush_file = tempfile.NamedTemporaryFile(delete=True)
509 ret = call("{path}/osdmaptool --export-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name,
510 crush_file=crush_file.name, path=CEPH_BIN),
511 stdout=DEVNULL,
512 shell=True)
513 assert(ret == 0)
514 output = check_output("{path}/crushtool --tree -i {crush_file} | tail -n {num_osd}".format(crush_file=crush_file.name,
515 num_osd=len(osd_ids), path=CEPH_BIN),
516 stderr=DEVNULL,
517 shell=True)
518 weights = []
519 for line in output.strip().split('\n'):
c07f9fc5
FG
520 print(line)
521 linev = re.split('\s+', line)
522 if linev[0] is '':
523 linev.pop(0)
524 print('linev %s' % linev)
3efd9988 525 weights.append(float(linev[2]))
7c673cae
FG
526
527 return weights
528
529
530def test_get_set_osdmap(CFSD_PREFIX, osd_ids, osd_paths):
531 print("Testing get-osdmap and set-osdmap")
532 errors = 0
533 kill_daemons()
534 weight = 1 / math.e # just some magic number in [0, 1]
535 changed = []
536 for osd_path in osd_paths:
537 if set_osd_weight(CFSD_PREFIX, osd_ids, osd_path, weight):
538 changed.append(osd_path)
539 else:
540 logging.warning("Failed to change the weights: {0}".format(osd_path))
541 # i am pissed off if none of the store gets changed
542 if not changed:
543 errors += 1
544
545 for osd_path in changed:
546 weights = get_osd_weights(CFSD_PREFIX, osd_ids, osd_path)
547 if not weights:
548 errors += 1
549 continue
550 if any(abs(w - weight) > 1e-5 for w in weights):
551 logging.warning("Weight is not changed: {0} != {1}".format(weights, weight))
552 errors += 1
553 return errors
554
555def test_get_set_inc_osdmap(CFSD_PREFIX, osd_path):
556 # incrementals are not used unless we need to build an MOSDMap to update
557 # OSD's peers, so an obvious way to test it is simply overwrite an epoch
558 # with a different copy, and read it back to see if it matches.
559 kill_daemons()
560 file_e2 = tempfile.NamedTemporaryFile(delete=True)
561 cmd = (CFSD_PREFIX + "--op get-inc-osdmap --file {file}").format(osd=osd_path,
562 file=file_e2.name)
563 output = check_output(cmd, shell=True)
564 epoch = int(re.findall('#(\d+)', output)[0])
565 # backup e1 incremental before overwriting it
566 epoch -= 1
567 file_e1_backup = tempfile.NamedTemporaryFile(delete=True)
568 cmd = CFSD_PREFIX + "--op get-inc-osdmap --epoch {epoch} --file {file}"
569 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_backup.name), shell=True)
570 if ret: return 1
571 # overwrite e1 with e2
572 cmd = CFSD_PREFIX + "--op set-inc-osdmap --force --epoch {epoch} --file {file}"
573 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e2.name), shell=True)
574 if ret: return 1
575 # Use dry-run to set back to e1 which shouldn't happen
576 cmd = CFSD_PREFIX + "--op set-inc-osdmap --dry-run --epoch {epoch} --file {file}"
577 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_backup.name), shell=True)
578 if ret: return 1
579 # read from e1
580 file_e1_read = tempfile.NamedTemporaryFile(delete=True)
581 cmd = CFSD_PREFIX + "--op get-inc-osdmap --epoch {epoch} --file {file}"
582 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_read.name), shell=True)
583 if ret: return 1
584 errors = 0
585 try:
586 if not filecmp.cmp(file_e2.name, file_e1_read.name, shallow=False):
587 logging.error("{{get,set}}-inc-osdmap mismatch {0} != {1}".format(file_e2.name, file_e1_read.name))
588 errors += 1
589 finally:
590 # revert the change with file_e1_backup
591 cmd = CFSD_PREFIX + "--op set-inc-osdmap --epoch {epoch} --file {file}"
592 ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_backup.name), shell=True)
593 if ret:
594 logging.error("Failed to revert the changed inc-osdmap")
595 errors += 1
596
597 return errors
598
599
600def test_removeall(CFSD_PREFIX, db, OBJREPPGS, REP_POOL, CEPH_BIN, OSDDIR, REP_NAME, NUM_CLONED_REP_OBJECTS):
601 # Test removeall
602 TMPFILE = r"/tmp/tmp.{pid}".format(pid=os.getpid())
603 nullfd = open(os.devnull, "w")
604 errors=0
605 print("Test removeall")
606 kill_daemons()
607 for nspace in db.keys():
608 for basename in db[nspace].keys():
609 JSON = db[nspace][basename]['json']
610 for pg in OBJREPPGS:
611 OSDS = get_osds(pg, OSDDIR)
612 for osd in OSDS:
613 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
614 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
615 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
616 if not fnames:
617 continue
618
619 if int(basename.split(REP_NAME)[1]) <= int(NUM_CLONED_REP_OBJECTS):
620 cmd = (CFSD_PREFIX + "'{json}' remove").format(osd=osd, json=JSON)
621 errors += test_failure(cmd, "Snapshots are present, use removeall to delete everything")
622
623 cmd = (CFSD_PREFIX + " --force --dry-run '{json}' remove").format(osd=osd, json=JSON)
624 logging.debug(cmd)
625 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
626 if ret != 0:
627 logging.error("remove with --force failed for {json}".format(json=JSON))
628 errors += 1
629
630 cmd = (CFSD_PREFIX + " --dry-run '{json}' removeall").format(osd=osd, json=JSON)
631 logging.debug(cmd)
632 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
633 if ret != 0:
634 logging.error("removeall failed for {json}".format(json=JSON))
635 errors += 1
636
637 cmd = (CFSD_PREFIX + " '{json}' removeall").format(osd=osd, json=JSON)
638 logging.debug(cmd)
639 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
640 if ret != 0:
641 logging.error("removeall failed for {json}".format(json=JSON))
642 errors += 1
643
644 tmpfd = open(TMPFILE, "w")
645 cmd = (CFSD_PREFIX + "--op list --pgid {pg} --namespace {ns} {name}").format(osd=osd, pg=pg, ns=nspace, name=basename)
646 logging.debug(cmd)
647 ret = call(cmd, shell=True, stdout=tmpfd)
648 if ret != 0:
649 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
650 errors += 1
651 tmpfd.close()
652 lines = get_lines(TMPFILE)
653 if len(lines) != 0:
654 logging.error("Removeall didn't remove all objects {ns}/{name} : {lines}".format(ns=nspace, name=basename, lines=lines))
655 errors += 1
656 vstart(new=False)
657 wait_for_health()
658 cmd = "{path}/rados -p {pool} rmsnap snap1".format(pool=REP_POOL, path=CEPH_BIN)
659 logging.debug(cmd)
660 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
661 if ret != 0:
662 logging.error("rados rmsnap failed")
663 errors += 1
664 time.sleep(2)
665 wait_for_health()
666 return errors
667
668
669def main(argv):
670 if sys.version_info[0] < 3:
671 sys.stdout = stdout = os.fdopen(sys.stdout.fileno(), 'wb', 0)
672 else:
673 stdout = sys.stdout.buffer
674 if len(argv) > 1 and argv[1] == "debug":
675 nullfd = stdout
676 else:
677 nullfd = DEVNULL
678
3efd9988
FG
679 call("rm -fr {dir}; mkdir -p {dir}".format(dir=CEPH_DIR), shell=True)
680 os.chdir(CEPH_DIR)
7c673cae 681 os.environ["CEPH_DIR"] = CEPH_DIR
3efd9988 682 OSDDIR = "dev"
7c673cae
FG
683 REP_POOL = "rep_pool"
684 REP_NAME = "REPobject"
685 EC_POOL = "ec_pool"
686 EC_NAME = "ECobject"
687 if len(argv) > 0 and argv[0] == 'large':
688 PG_COUNT = 12
a8e16298
TL
689 NUM_REP_OBJECTS = 200
690 NUM_CLONED_REP_OBJECTS = 50
7c673cae
FG
691 NUM_EC_OBJECTS = 12
692 NUM_NSPACES = 4
693 # Larger data sets for first object per namespace
694 DATALINECOUNT = 50000
695 # Number of objects to do xattr/omap testing on
696 ATTR_OBJS = 10
697 else:
698 PG_COUNT = 4
699 NUM_REP_OBJECTS = 2
700 NUM_CLONED_REP_OBJECTS = 2
701 NUM_EC_OBJECTS = 2
702 NUM_NSPACES = 2
703 # Larger data sets for first object per namespace
704 DATALINECOUNT = 10
705 # Number of objects to do xattr/omap testing on
706 ATTR_OBJS = 2
707 ERRORS = 0
708 pid = os.getpid()
709 TESTDIR = "/tmp/test.{pid}".format(pid=pid)
710 DATADIR = "/tmp/data.{pid}".format(pid=pid)
11fdf7f2 711 CFSD_PREFIX = CEPH_BIN + "/ceph-objectstore-tool --no-mon-config --data-path " + OSDDIR + "/{osd} "
7c673cae
FG
712 PROFNAME = "testecprofile"
713
714 os.environ['CEPH_CONF'] = CEPH_CONF
715 vstart(new=True)
716 wait_for_health()
717
718 cmd = "{path}/ceph osd pool create {pool} {pg} {pg} replicated".format(pool=REP_POOL, pg=PG_COUNT, path=CEPH_BIN)
719 logging.debug(cmd)
720 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
3efd9988 721 time.sleep(2)
7c673cae
FG
722 REPID = get_pool_id(REP_POOL, nullfd)
723
724 print("Created Replicated pool #{repid}".format(repid=REPID))
725
224ce89b 726 cmd = "{path}/ceph osd erasure-code-profile set {prof} crush-failure-domain=osd".format(prof=PROFNAME, path=CEPH_BIN)
7c673cae
FG
727 logging.debug(cmd)
728 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
729 cmd = "{path}/ceph osd erasure-code-profile get {prof}".format(prof=PROFNAME, path=CEPH_BIN)
730 logging.debug(cmd)
731 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
732 cmd = "{path}/ceph osd pool create {pool} {pg} {pg} erasure {prof}".format(pool=EC_POOL, prof=PROFNAME, pg=PG_COUNT, path=CEPH_BIN)
733 logging.debug(cmd)
734 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
735 ECID = get_pool_id(EC_POOL, nullfd)
736
737 print("Created Erasure coded pool #{ecid}".format(ecid=ECID))
738
739 print("Creating {objs} objects in replicated pool".format(objs=(NUM_REP_OBJECTS*NUM_NSPACES)))
740 cmd = "mkdir -p {datadir}".format(datadir=DATADIR)
741 logging.debug(cmd)
742 call(cmd, shell=True)
743
744 db = {}
745
746 objects = range(1, NUM_REP_OBJECTS + 1)
747 nspaces = range(NUM_NSPACES)
748 for n in nspaces:
749 nspace = get_nspace(n)
750
751 db[nspace] = {}
752
753 for i in objects:
754 NAME = REP_NAME + "{num}".format(num=i)
755 LNAME = nspace + "-" + NAME
756 DDNAME = os.path.join(DATADIR, LNAME)
757 DDNAME += "__head"
758
759 cmd = "rm -f " + DDNAME
760 logging.debug(cmd)
761 call(cmd, shell=True)
762
763 if i == 1:
764 dataline = range(DATALINECOUNT)
765 else:
766 dataline = range(1)
767 fd = open(DDNAME, "w")
768 data = "This is the replicated data for " + LNAME + "\n"
769 for _ in dataline:
770 fd.write(data)
771 fd.close()
772
773 cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=REP_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
774 logging.debug(cmd)
775 ret = call(cmd, shell=True, stderr=nullfd)
776 if ret != 0:
777 logging.critical("Rados put command failed with {ret}".format(ret=ret))
778 return 1
779
780 db[nspace][NAME] = {}
781
782 if i < ATTR_OBJS + 1:
783 keys = range(i)
784 else:
785 keys = range(0)
786 db[nspace][NAME]["xattr"] = {}
787 for k in keys:
788 if k == 0:
789 continue
790 mykey = "key{i}-{k}".format(i=i, k=k)
791 myval = "val{i}-{k}".format(i=i, k=k)
792 cmd = "{path}/rados -p {pool} -N '{nspace}' setxattr {name} {key} {val}".format(pool=REP_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN)
793 logging.debug(cmd)
794 ret = call(cmd, shell=True)
795 if ret != 0:
796 logging.error("setxattr failed with {ret}".format(ret=ret))
797 ERRORS += 1
798 db[nspace][NAME]["xattr"][mykey] = myval
799
800 # Create omap header in all objects but REPobject1
801 if i < ATTR_OBJS + 1 and i != 1:
802 myhdr = "hdr{i}".format(i=i)
803 cmd = "{path}/rados -p {pool} -N '{nspace}' setomapheader {name} {hdr}".format(pool=REP_POOL, name=NAME, hdr=myhdr, nspace=nspace, path=CEPH_BIN)
804 logging.debug(cmd)
805 ret = call(cmd, shell=True)
806 if ret != 0:
807 logging.critical("setomapheader failed with {ret}".format(ret=ret))
808 ERRORS += 1
809 db[nspace][NAME]["omapheader"] = myhdr
810
811 db[nspace][NAME]["omap"] = {}
812 for k in keys:
813 if k == 0:
814 continue
815 mykey = "okey{i}-{k}".format(i=i, k=k)
816 myval = "oval{i}-{k}".format(i=i, k=k)
817 cmd = "{path}/rados -p {pool} -N '{nspace}' setomapval {name} {key} {val}".format(pool=REP_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN)
818 logging.debug(cmd)
819 ret = call(cmd, shell=True)
820 if ret != 0:
821 logging.critical("setomapval failed with {ret}".format(ret=ret))
822 db[nspace][NAME]["omap"][mykey] = myval
823
824 # Create some clones
825 cmd = "{path}/rados -p {pool} mksnap snap1".format(pool=REP_POOL, path=CEPH_BIN)
826 logging.debug(cmd)
827 call(cmd, shell=True)
828
829 objects = range(1, NUM_CLONED_REP_OBJECTS + 1)
830 nspaces = range(NUM_NSPACES)
831 for n in nspaces:
832 nspace = get_nspace(n)
833
834 for i in objects:
835 NAME = REP_NAME + "{num}".format(num=i)
836 LNAME = nspace + "-" + NAME
837 DDNAME = os.path.join(DATADIR, LNAME)
838 # First clone
839 CLONENAME = DDNAME + "__1"
840 DDNAME += "__head"
841
842 cmd = "mv -f " + DDNAME + " " + CLONENAME
843 logging.debug(cmd)
844 call(cmd, shell=True)
845
846 if i == 1:
847 dataline = range(DATALINECOUNT)
848 else:
849 dataline = range(1)
850 fd = open(DDNAME, "w")
851 data = "This is the replicated data after a snapshot for " + LNAME + "\n"
852 for _ in dataline:
853 fd.write(data)
854 fd.close()
855
856 cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=REP_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
857 logging.debug(cmd)
858 ret = call(cmd, shell=True, stderr=nullfd)
859 if ret != 0:
860 logging.critical("Rados put command failed with {ret}".format(ret=ret))
861 return 1
862
863 print("Creating {objs} objects in erasure coded pool".format(objs=(NUM_EC_OBJECTS*NUM_NSPACES)))
864
865 objects = range(1, NUM_EC_OBJECTS + 1)
866 nspaces = range(NUM_NSPACES)
867 for n in nspaces:
868 nspace = get_nspace(n)
869
870 for i in objects:
871 NAME = EC_NAME + "{num}".format(num=i)
872 LNAME = nspace + "-" + NAME
873 DDNAME = os.path.join(DATADIR, LNAME)
874 DDNAME += "__head"
875
876 cmd = "rm -f " + DDNAME
877 logging.debug(cmd)
878 call(cmd, shell=True)
879
880 if i == 1:
881 dataline = range(DATALINECOUNT)
882 else:
883 dataline = range(1)
884 fd = open(DDNAME, "w")
885 data = "This is the erasure coded data for " + LNAME + "\n"
886 for j in dataline:
887 fd.write(data)
888 fd.close()
889
890 cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=EC_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
891 logging.debug(cmd)
892 ret = call(cmd, shell=True, stderr=nullfd)
893 if ret != 0:
894 logging.critical("Erasure coded pool creation failed with {ret}".format(ret=ret))
895 return 1
896
897 db[nspace][NAME] = {}
898
899 db[nspace][NAME]["xattr"] = {}
900 if i < ATTR_OBJS + 1:
901 keys = range(i)
902 else:
903 keys = range(0)
904 for k in keys:
905 if k == 0:
906 continue
907 mykey = "key{i}-{k}".format(i=i, k=k)
908 myval = "val{i}-{k}".format(i=i, k=k)
909 cmd = "{path}/rados -p {pool} -N '{nspace}' setxattr {name} {key} {val}".format(pool=EC_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN)
910 logging.debug(cmd)
911 ret = call(cmd, shell=True)
912 if ret != 0:
913 logging.error("setxattr failed with {ret}".format(ret=ret))
914 ERRORS += 1
915 db[nspace][NAME]["xattr"][mykey] = myval
916
917 # Omap isn't supported in EC pools
918 db[nspace][NAME]["omap"] = {}
919
920 logging.debug(db)
921
922 kill_daemons()
923
924 if ERRORS:
925 logging.critical("Unable to set up test")
926 return 1
927
928 ALLREPPGS = get_pgs(OSDDIR, REPID)
929 logging.debug(ALLREPPGS)
930 ALLECPGS = get_pgs(OSDDIR, ECID)
931 logging.debug(ALLECPGS)
932
933 OBJREPPGS = get_objs(ALLREPPGS, REP_NAME, OSDDIR, REPID)
934 logging.debug(OBJREPPGS)
935 OBJECPGS = get_objs(ALLECPGS, EC_NAME, OSDDIR, ECID)
936 logging.debug(OBJECPGS)
937
938 ONEPG = ALLREPPGS[0]
939 logging.debug(ONEPG)
940 osds = get_osds(ONEPG, OSDDIR)
941 ONEOSD = osds[0]
942 logging.debug(ONEOSD)
943
944 print("Test invalid parameters")
945 # On export can't use stdout to a terminal
946 cmd = (CFSD_PREFIX + "--op export --pgid {pg}").format(osd=ONEOSD, pg=ONEPG)
947 ERRORS += test_failure(cmd, "stdout is a tty and no --file filename specified", tty=True)
948
949 # On export can't use stdout to a terminal
950 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file -").format(osd=ONEOSD, pg=ONEPG)
951 ERRORS += test_failure(cmd, "stdout is a tty and no --file filename specified", tty=True)
952
953 # Prep a valid ec export file for import failure tests
954 ONEECPG = ALLECPGS[0]
955 osds = get_osds(ONEECPG, OSDDIR)
956 ONEECOSD = osds[0]
957 OTHERFILE = "/tmp/foo.{pid}".format(pid=pid)
958 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=ONEECOSD, pg=ONEECPG, file=OTHERFILE)
959 logging.debug(cmd)
960 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
961
7c673cae
FG
962 os.unlink(OTHERFILE)
963
964 # Prep a valid export file for import failure tests
965 OTHERFILE = "/tmp/foo.{pid}".format(pid=pid)
966 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=ONEOSD, pg=ONEPG, file=OTHERFILE)
967 logging.debug(cmd)
968 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
969
11fdf7f2 970 # On import can't specify a different pgid than the file
7c673cae 971 TMPPG="{pool}.80".format(pool=REPID)
11fdf7f2
TL
972 cmd = (CFSD_PREFIX + "--op import --pgid 12.dd --file {file}").format(osd=ONEOSD, pg=TMPPG, file=OTHERFILE)
973 ERRORS += test_failure(cmd, "specified pgid 12.dd does not match actual pgid")
7c673cae
FG
974
975 os.unlink(OTHERFILE)
976 cmd = (CFSD_PREFIX + "--op import --file {FOO}").format(osd=ONEOSD, FOO=OTHERFILE)
977 ERRORS += test_failure(cmd, "file: {FOO}: No such file or directory".format(FOO=OTHERFILE))
978
11fdf7f2 979 cmd = "{path}/ceph-objectstore-tool --no-mon-config --data-path BAD_DATA_PATH --op list".format(osd=ONEOSD, path=CEPH_BIN)
7c673cae
FG
980 ERRORS += test_failure(cmd, "data-path: BAD_DATA_PATH: No such file or directory")
981
3efd9988
FG
982 cmd = (CFSD_PREFIX + "--journal-path BAD_JOURNAL_PATH --op list").format(osd=ONEOSD)
983 ERRORS += test_failure(cmd, "journal-path: BAD_JOURNAL_PATH: No such file or directory")
984
985 cmd = (CFSD_PREFIX + "--journal-path /bin --op list").format(osd=ONEOSD)
986 ERRORS += test_failure(cmd, "journal-path: /bin: (21) Is a directory")
987
7c673cae
FG
988 # On import can't use stdin from a terminal
989 cmd = (CFSD_PREFIX + "--op import --pgid {pg}").format(osd=ONEOSD, pg=ONEPG)
990 ERRORS += test_failure(cmd, "stdin is a tty and no --file filename specified", tty=True)
991
992 # On import can't use stdin from a terminal
993 cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file -").format(osd=ONEOSD, pg=ONEPG)
994 ERRORS += test_failure(cmd, "stdin is a tty and no --file filename specified", tty=True)
995
996 # Specify a bad --type
997 os.mkdir(OSDDIR + "/fakeosd")
11fdf7f2 998 cmd = ("{path}/ceph-objectstore-tool --no-mon-config --data-path " + OSDDIR + "/{osd} --type foobar --op list --pgid {pg}").format(osd="fakeosd", pg=ONEPG, path=CEPH_BIN)
7c673cae
FG
999 ERRORS += test_failure(cmd, "Unable to create store of type foobar")
1000
1001 # Don't specify a data-path
11fdf7f2 1002 cmd = "{path}/ceph-objectstore-tool --no-mon-config --type memstore --op list --pgid {pg}".format(dir=OSDDIR, osd=ONEOSD, pg=ONEPG, path=CEPH_BIN)
7c673cae
FG
1003 ERRORS += test_failure(cmd, "Must provide --data-path")
1004
3efd9988
FG
1005 cmd = (CFSD_PREFIX + "--op remove --pgid 2.0").format(osd=ONEOSD)
1006 ERRORS += test_failure(cmd, "Please use export-remove or you must use --force option")
1007
1008 cmd = (CFSD_PREFIX + "--force --op remove").format(osd=ONEOSD)
7c673cae
FG
1009 ERRORS += test_failure(cmd, "Must provide pgid")
1010
1011 # Don't secify a --op nor object command
1012 cmd = CFSD_PREFIX.format(osd=ONEOSD)
1013 ERRORS += test_failure(cmd, "Must provide --op or object command...")
1014
1015 # Specify a bad --op command
1016 cmd = (CFSD_PREFIX + "--op oops").format(osd=ONEOSD)
11fdf7f2 1017 ERRORS += test_failure(cmd, "Must provide --op (info, log, remove, mkfs, fsck, repair, export, export-remove, import, list, fix-lost, list-pgs, dump-journal, dump-super, meta-list, get-osdmap, set-osdmap, get-inc-osdmap, set-inc-osdmap, mark-complete, reset-last-complete, dump-export, trim-pg-log)")
7c673cae
FG
1018
1019 # Provide just the object param not a command
1020 cmd = (CFSD_PREFIX + "object").format(osd=ONEOSD)
1021 ERRORS += test_failure(cmd, "Invalid syntax, missing command")
1022
1023 # Provide an object name that doesn't exist
1024 cmd = (CFSD_PREFIX + "NON_OBJECT get-bytes").format(osd=ONEOSD)
1025 ERRORS += test_failure(cmd, "No object id 'NON_OBJECT' found")
1026
1027 # Provide an invalid object command
1028 cmd = (CFSD_PREFIX + "--pgid {pg} '' notacommand").format(osd=ONEOSD, pg=ONEPG)
1029 ERRORS += test_failure(cmd, "Unknown object command 'notacommand'")
1030
1031 cmd = (CFSD_PREFIX + "foo list-omap").format(osd=ONEOSD, pg=ONEPG)
1032 ERRORS += test_failure(cmd, "No object id 'foo' found or invalid JSON specified")
1033
1034 cmd = (CFSD_PREFIX + "'{{\"oid\":\"obj4\",\"key\":\"\",\"snapid\":-1,\"hash\":2826278768,\"max\":0,\"pool\":1,\"namespace\":\"\"}}' list-omap").format(osd=ONEOSD, pg=ONEPG)
1035 ERRORS += test_failure(cmd, "Without --pgid the object '{\"oid\":\"obj4\",\"key\":\"\",\"snapid\":-1,\"hash\":2826278768,\"max\":0,\"pool\":1,\"namespace\":\"\"}' must be a JSON array")
1036
1037 cmd = (CFSD_PREFIX + "'[]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1038 ERRORS += test_failure(cmd, "Object '[]' must be a JSON array with 2 elements")
1039
1040 cmd = (CFSD_PREFIX + "'[\"1.0\"]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1041 ERRORS += test_failure(cmd, "Object '[\"1.0\"]' must be a JSON array with 2 elements")
1042
1043 cmd = (CFSD_PREFIX + "'[\"1.0\", 5, 8, 9]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1044 ERRORS += test_failure(cmd, "Object '[\"1.0\", 5, 8, 9]' must be a JSON array with 2 elements")
1045
1046 cmd = (CFSD_PREFIX + "'[1, 2]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1047 ERRORS += test_failure(cmd, "Object '[1, 2]' must be a JSON array with the first element a string")
1048
1049 cmd = (CFSD_PREFIX + "'[\"1.3\",{{\"snapid\":\"not an int\"}}]' list-omap").format(osd=ONEOSD, pg=ONEPG)
1050 ERRORS += test_failure(cmd, "Decode object JSON error: value type is 2 not 4")
1051
1052 TMPFILE = r"/tmp/tmp.{pid}".format(pid=pid)
1053 ALLPGS = OBJREPPGS + OBJECPGS
1054 OSDS = get_osds(ALLPGS[0], OSDDIR)
1055 osd = OSDS[0]
1056
1057 print("Test all --op dump-journal")
1058 ALLOSDS = [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]
1059 ERRORS += test_dump_journal(CFSD_PREFIX, ALLOSDS)
1060
1061 # Test --op list and generate json for all objects
1062 print("Test --op list variants")
1063
1064 # retrieve all objects from all PGs
1065 tmpfd = open(TMPFILE, "wb")
1066 cmd = (CFSD_PREFIX + "--op list --format json").format(osd=osd)
1067 logging.debug(cmd)
1068 ret = call(cmd, shell=True, stdout=tmpfd)
1069 if ret != 0:
1070 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
1071 ERRORS += 1
1072 tmpfd.close()
1073 lines = get_lines(TMPFILE)
1074 JSONOBJ = sorted(set(lines))
1075 (pgid, coll, jsondict) = json.loads(JSONOBJ[0])[0]
1076
1077 # retrieve all objects in a given PG
1078 tmpfd = open(OTHERFILE, "ab")
1079 cmd = (CFSD_PREFIX + "--op list --pgid {pg} --format json").format(osd=osd, pg=pgid)
1080 logging.debug(cmd)
1081 ret = call(cmd, shell=True, stdout=tmpfd)
1082 if ret != 0:
1083 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
1084 ERRORS += 1
1085 tmpfd.close()
1086 lines = get_lines(OTHERFILE)
1087 JSONOBJ = sorted(set(lines))
1088 (other_pgid, other_coll, other_jsondict) = json.loads(JSONOBJ[0])[0]
1089
1090 if pgid != other_pgid or jsondict != other_jsondict or coll != other_coll:
1091 logging.error("the first line of --op list is different "
1092 "from the first line of --op list --pgid {pg}".format(pg=pgid))
1093 ERRORS += 1
1094
1095 # retrieve all objects with a given name in a given PG
1096 tmpfd = open(OTHERFILE, "wb")
1097 cmd = (CFSD_PREFIX + "--op list --pgid {pg} {object} --format json").format(osd=osd, pg=pgid, object=jsondict['oid'])
1098 logging.debug(cmd)
1099 ret = call(cmd, shell=True, stdout=tmpfd)
1100 if ret != 0:
1101 logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd))
1102 ERRORS += 1
1103 tmpfd.close()
1104 lines = get_lines(OTHERFILE)
1105 JSONOBJ = sorted(set(lines))
1106 (other_pgid, other_coll, other_jsondict) in json.loads(JSONOBJ[0])[0]
1107
1108 if pgid != other_pgid or jsondict != other_jsondict or coll != other_coll:
1109 logging.error("the first line of --op list is different "
1110 "from the first line of --op list --pgid {pg} {object}".format(pg=pgid, object=jsondict['oid']))
1111 ERRORS += 1
1112
1113 print("Test --op list by generating json for all objects using default format")
1114 for pg in ALLPGS:
1115 OSDS = get_osds(pg, OSDDIR)
1116 for osd in OSDS:
1117 tmpfd = open(TMPFILE, "ab")
1118 cmd = (CFSD_PREFIX + "--op list --pgid {pg}").format(osd=osd, pg=pg)
1119 logging.debug(cmd)
1120 ret = call(cmd, shell=True, stdout=tmpfd)
1121 if ret != 0:
1122 logging.error("Bad exit status {ret} from --op list request".format(ret=ret))
1123 ERRORS += 1
1124
1125 tmpfd.close()
1126 lines = get_lines(TMPFILE)
1127 JSONOBJ = sorted(set(lines))
1128 for JSON in JSONOBJ:
1129 (pgid, jsondict) = json.loads(JSON)
1130 # Skip clones for now
1131 if jsondict['snapid'] != -2:
1132 continue
1133 db[jsondict['namespace']][jsondict['oid']]['json'] = json.dumps((pgid, jsondict))
1134 # print db[jsondict['namespace']][jsondict['oid']]['json']
1135 if jsondict['oid'].find(EC_NAME) == 0 and 'shard_id' not in jsondict:
1136 logging.error("Malformed JSON {json}".format(json=JSON))
1137 ERRORS += 1
1138
1139 # Test get-bytes
1140 print("Test get-bytes and set-bytes")
1141 for nspace in db.keys():
1142 for basename in db[nspace].keys():
1143 file = os.path.join(DATADIR, nspace + "-" + basename + "__head")
1144 JSON = db[nspace][basename]['json']
1145 GETNAME = "/tmp/getbytes.{pid}".format(pid=pid)
1146 TESTNAME = "/tmp/testbytes.{pid}".format(pid=pid)
1147 SETNAME = "/tmp/setbytes.{pid}".format(pid=pid)
1148 BADNAME = "/tmp/badbytes.{pid}".format(pid=pid)
1149 for pg in OBJREPPGS:
1150 OSDS = get_osds(pg, OSDDIR)
1151 for osd in OSDS:
1152 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1153 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1154 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1155 if not fnames:
1156 continue
1157 try:
1158 os.unlink(GETNAME)
1159 except:
1160 pass
1161 cmd = (CFSD_PREFIX + " --pgid {pg} '{json}' get-bytes {fname}").format(osd=osd, pg=pg, json=JSON, fname=GETNAME)
1162 logging.debug(cmd)
1163 ret = call(cmd, shell=True)
1164 if ret != 0:
1165 logging.error("Bad exit status {ret}".format(ret=ret))
1166 ERRORS += 1
1167 continue
1168 cmd = "diff -q {file} {getfile}".format(file=file, getfile=GETNAME)
1169 ret = call(cmd, shell=True)
1170 if ret != 0:
1171 logging.error("Data from get-bytes differ")
1172 logging.debug("Got:")
1173 cat_file(logging.DEBUG, GETNAME)
1174 logging.debug("Expected:")
1175 cat_file(logging.DEBUG, file)
1176 ERRORS += 1
1177 fd = open(SETNAME, "w")
1178 data = "put-bytes going into {file}\n".format(file=file)
1179 fd.write(data)
1180 fd.close()
1181 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' set-bytes {sname}").format(osd=osd, pg=pg, json=JSON, sname=SETNAME)
1182 logging.debug(cmd)
1183 ret = call(cmd, shell=True)
1184 if ret != 0:
1185 logging.error("Bad exit status {ret} from set-bytes".format(ret=ret))
1186 ERRORS += 1
1187 fd = open(TESTNAME, "wb")
1188 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-bytes -").format(osd=osd, pg=pg, json=JSON)
1189 logging.debug(cmd)
1190 ret = call(cmd, shell=True, stdout=fd)
1191 fd.close()
1192 if ret != 0:
1193 logging.error("Bad exit status {ret} from get-bytes".format(ret=ret))
1194 ERRORS += 1
1195 cmd = "diff -q {setfile} {testfile}".format(setfile=SETNAME, testfile=TESTNAME)
1196 logging.debug(cmd)
1197 ret = call(cmd, shell=True)
1198 if ret != 0:
1199 logging.error("Data after set-bytes differ")
1200 logging.debug("Got:")
1201 cat_file(logging.DEBUG, TESTNAME)
1202 logging.debug("Expected:")
1203 cat_file(logging.DEBUG, SETNAME)
1204 ERRORS += 1
1205
1206 # Use set-bytes with --dry-run and make sure contents haven't changed
1207 fd = open(BADNAME, "w")
1208 data = "Bad data for --dry-run in {file}\n".format(file=file)
1209 fd.write(data)
1210 fd.close()
1211 cmd = (CFSD_PREFIX + "--dry-run --pgid {pg} '{json}' set-bytes {sname}").format(osd=osd, pg=pg, json=JSON, sname=BADNAME)
1212 logging.debug(cmd)
1213 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1214 if ret != 0:
1215 logging.error("Bad exit status {ret} from set-bytes --dry-run".format(ret=ret))
1216 ERRORS += 1
1217 fd = open(TESTNAME, "wb")
1218 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-bytes -").format(osd=osd, pg=pg, json=JSON)
1219 logging.debug(cmd)
1220 ret = call(cmd, shell=True, stdout=fd)
1221 fd.close()
1222 if ret != 0:
1223 logging.error("Bad exit status {ret} from get-bytes".format(ret=ret))
1224 ERRORS += 1
1225 cmd = "diff -q {setfile} {testfile}".format(setfile=SETNAME, testfile=TESTNAME)
1226 logging.debug(cmd)
1227 ret = call(cmd, shell=True)
1228 if ret != 0:
1229 logging.error("Data after set-bytes --dry-run changed!")
1230 logging.debug("Got:")
1231 cat_file(logging.DEBUG, TESTNAME)
1232 logging.debug("Expected:")
1233 cat_file(logging.DEBUG, SETNAME)
1234 ERRORS += 1
1235
1236 fd = open(file, "rb")
1237 cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' set-bytes").format(osd=osd, pg=pg, json=JSON)
1238 logging.debug(cmd)
1239 ret = call(cmd, shell=True, stdin=fd)
1240 if ret != 0:
1241 logging.error("Bad exit status {ret} from set-bytes to restore object".format(ret=ret))
1242 ERRORS += 1
1243 fd.close()
1244
1245 try:
1246 os.unlink(GETNAME)
1247 except:
1248 pass
1249 try:
1250 os.unlink(TESTNAME)
1251 except:
1252 pass
1253 try:
1254 os.unlink(SETNAME)
1255 except:
1256 pass
1257 try:
1258 os.unlink(BADNAME)
1259 except:
1260 pass
1261
1262 # Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap
1263 print("Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap")
1264 for nspace in db.keys():
1265 for basename in db[nspace].keys():
1266 file = os.path.join(DATADIR, nspace + "-" + basename + "__head")
1267 JSON = db[nspace][basename]['json']
1268 for pg in OBJREPPGS:
1269 OSDS = get_osds(pg, OSDDIR)
1270 for osd in OSDS:
1271 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1272 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1273 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1274 if not fnames:
1275 continue
1276 for key, val in db[nspace][basename]["xattr"].items():
1277 attrkey = "_" + key
1278 cmd = (CFSD_PREFIX + " '{json}' get-attr {key}").format(osd=osd, json=JSON, key=attrkey)
1279 logging.debug(cmd)
1280 getval = check_output(cmd, shell=True)
1281 if getval != val:
1282 logging.error("get-attr of key {key} returned wrong val: {get} instead of {orig}".format(key=attrkey, get=getval, orig=val))
1283 ERRORS += 1
1284 continue
1285 # set-attr to bogus value "foobar"
1286 cmd = ("echo -n foobar | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1287 logging.debug(cmd)
1288 ret = call(cmd, shell=True)
1289 if ret != 0:
1290 logging.error("Bad exit status {ret} from set-attr".format(ret=ret))
1291 ERRORS += 1
1292 continue
1293 # Test set-attr with dry-run
1294 cmd = ("echo -n dryrunbroken | " + CFSD_PREFIX + "--dry-run '{json}' set-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1295 logging.debug(cmd)
1296 ret = call(cmd, shell=True, stdout=nullfd)
1297 if ret != 0:
1298 logging.error("Bad exit status {ret} from set-attr".format(ret=ret))
1299 ERRORS += 1
1300 continue
1301 # Check the set-attr
1302 cmd = (CFSD_PREFIX + " --pgid {pg} '{json}' get-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1303 logging.debug(cmd)
1304 getval = check_output(cmd, shell=True)
1305 if ret != 0:
1306 logging.error("Bad exit status {ret} from get-attr".format(ret=ret))
1307 ERRORS += 1
1308 continue
1309 if getval != "foobar":
1310 logging.error("Check of set-attr failed because we got {val}".format(val=getval))
1311 ERRORS += 1
1312 continue
1313 # Test rm-attr
1314 cmd = (CFSD_PREFIX + "'{json}' rm-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1315 logging.debug(cmd)
1316 ret = call(cmd, shell=True)
1317 if ret != 0:
1318 logging.error("Bad exit status {ret} from rm-attr".format(ret=ret))
1319 ERRORS += 1
1320 continue
1321 # Check rm-attr with dry-run
1322 cmd = (CFSD_PREFIX + "--dry-run '{json}' rm-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1323 logging.debug(cmd)
1324 ret = call(cmd, shell=True, stdout=nullfd)
1325 if ret != 0:
1326 logging.error("Bad exit status {ret} from rm-attr".format(ret=ret))
1327 ERRORS += 1
1328 continue
1329 cmd = (CFSD_PREFIX + "'{json}' get-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey)
1330 logging.debug(cmd)
1331 ret = call(cmd, shell=True, stderr=nullfd, stdout=nullfd)
1332 if ret == 0:
1333 logging.error("For rm-attr expect get-attr to fail, but it succeeded")
1334 ERRORS += 1
1335 # Put back value
1336 cmd = ("echo -n {val} | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey, val=val)
1337 logging.debug(cmd)
1338 ret = call(cmd, shell=True)
1339 if ret != 0:
1340 logging.error("Bad exit status {ret} from set-attr".format(ret=ret))
1341 ERRORS += 1
1342 continue
1343
1344 hdr = db[nspace][basename].get("omapheader", "")
1345 cmd = (CFSD_PREFIX + "'{json}' get-omaphdr").format(osd=osd, json=JSON)
1346 logging.debug(cmd)
1347 gethdr = check_output(cmd, shell=True)
1348 if gethdr != hdr:
1349 logging.error("get-omaphdr was wrong: {get} instead of {orig}".format(get=gethdr, orig=hdr))
1350 ERRORS += 1
1351 continue
1352 # set-omaphdr to bogus value "foobar"
1353 cmd = ("echo -n foobar | " + CFSD_PREFIX + "'{json}' set-omaphdr").format(osd=osd, pg=pg, json=JSON)
1354 logging.debug(cmd)
1355 ret = call(cmd, shell=True)
1356 if ret != 0:
1357 logging.error("Bad exit status {ret} from set-omaphdr".format(ret=ret))
1358 ERRORS += 1
1359 continue
1360 # Check the set-omaphdr
1361 cmd = (CFSD_PREFIX + "'{json}' get-omaphdr").format(osd=osd, pg=pg, json=JSON)
1362 logging.debug(cmd)
1363 gethdr = check_output(cmd, shell=True)
1364 if ret != 0:
1365 logging.error("Bad exit status {ret} from get-omaphdr".format(ret=ret))
1366 ERRORS += 1
1367 continue
1368 if gethdr != "foobar":
1369 logging.error("Check of set-omaphdr failed because we got {val}".format(val=getval))
1370 ERRORS += 1
1371 continue
1372 # Test dry-run with set-omaphdr
1373 cmd = ("echo -n dryrunbroken | " + CFSD_PREFIX + "--dry-run '{json}' set-omaphdr").format(osd=osd, pg=pg, json=JSON)
1374 logging.debug(cmd)
1375 ret = call(cmd, shell=True, stdout=nullfd)
1376 if ret != 0:
1377 logging.error("Bad exit status {ret} from set-omaphdr".format(ret=ret))
1378 ERRORS += 1
1379 continue
1380 # Put back value
1381 cmd = ("echo -n {val} | " + CFSD_PREFIX + "'{json}' set-omaphdr").format(osd=osd, pg=pg, json=JSON, val=hdr)
1382 logging.debug(cmd)
1383 ret = call(cmd, shell=True)
1384 if ret != 0:
1385 logging.error("Bad exit status {ret} from set-omaphdr".format(ret=ret))
1386 ERRORS += 1
1387 continue
1388
1389 for omapkey, val in db[nspace][basename]["omap"].items():
1390 cmd = (CFSD_PREFIX + " '{json}' get-omap {key}").format(osd=osd, json=JSON, key=omapkey)
1391 logging.debug(cmd)
1392 getval = check_output(cmd, shell=True)
1393 if getval != val:
1394 logging.error("get-omap of key {key} returned wrong val: {get} instead of {orig}".format(key=omapkey, get=getval, orig=val))
1395 ERRORS += 1
1396 continue
1397 # set-omap to bogus value "foobar"
1398 cmd = ("echo -n foobar | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1399 logging.debug(cmd)
1400 ret = call(cmd, shell=True)
1401 if ret != 0:
1402 logging.error("Bad exit status {ret} from set-omap".format(ret=ret))
1403 ERRORS += 1
1404 continue
1405 # Check set-omap with dry-run
1406 cmd = ("echo -n dryrunbroken | " + CFSD_PREFIX + "--dry-run --pgid {pg} '{json}' set-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1407 logging.debug(cmd)
1408 ret = call(cmd, shell=True, stdout=nullfd)
1409 if ret != 0:
1410 logging.error("Bad exit status {ret} from set-omap".format(ret=ret))
1411 ERRORS += 1
1412 continue
1413 # Check the set-omap
1414 cmd = (CFSD_PREFIX + " --pgid {pg} '{json}' get-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1415 logging.debug(cmd)
1416 getval = check_output(cmd, shell=True)
1417 if ret != 0:
1418 logging.error("Bad exit status {ret} from get-omap".format(ret=ret))
1419 ERRORS += 1
1420 continue
1421 if getval != "foobar":
1422 logging.error("Check of set-omap failed because we got {val}".format(val=getval))
1423 ERRORS += 1
1424 continue
1425 # Test rm-omap
1426 cmd = (CFSD_PREFIX + "'{json}' rm-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1427 logging.debug(cmd)
1428 ret = call(cmd, shell=True)
1429 if ret != 0:
1430 logging.error("Bad exit status {ret} from rm-omap".format(ret=ret))
1431 ERRORS += 1
1432 # Check rm-omap with dry-run
1433 cmd = (CFSD_PREFIX + "--dry-run '{json}' rm-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1434 logging.debug(cmd)
1435 ret = call(cmd, shell=True, stdout=nullfd)
1436 if ret != 0:
1437 logging.error("Bad exit status {ret} from rm-omap".format(ret=ret))
1438 ERRORS += 1
1439 cmd = (CFSD_PREFIX + "'{json}' get-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey)
1440 logging.debug(cmd)
1441 ret = call(cmd, shell=True, stderr=nullfd, stdout=nullfd)
1442 if ret == 0:
1443 logging.error("For rm-omap expect get-omap to fail, but it succeeded")
1444 ERRORS += 1
1445 # Put back value
1446 cmd = ("echo -n {val} | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey, val=val)
1447 logging.debug(cmd)
1448 ret = call(cmd, shell=True)
1449 if ret != 0:
1450 logging.error("Bad exit status {ret} from set-omap".format(ret=ret))
1451 ERRORS += 1
1452 continue
1453
1454 # Test dump
1455 print("Test dump")
1456 for nspace in db.keys():
1457 for basename in db[nspace].keys():
1458 file = os.path.join(DATADIR, nspace + "-" + basename + "__head")
1459 JSON = db[nspace][basename]['json']
a8e16298 1460 jsondict = json.loads(JSON)
7c673cae
FG
1461 for pg in OBJREPPGS:
1462 OSDS = get_osds(pg, OSDDIR)
1463 for osd in OSDS:
1464 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1465 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1466 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1467 if not fnames:
1468 continue
1469 if int(basename.split(REP_NAME)[1]) > int(NUM_CLONED_REP_OBJECTS):
1470 continue
a8e16298 1471 logging.debug("REPobject " + JSON)
7c673cae
FG
1472 cmd = (CFSD_PREFIX + " '{json}' dump | grep '\"snap\": 1,' > /dev/null").format(osd=osd, json=JSON)
1473 logging.debug(cmd)
1474 ret = call(cmd, shell=True)
1475 if ret != 0:
1476 logging.error("Invalid dump for {json}".format(json=JSON))
1477 ERRORS += 1
a8e16298
TL
1478 if 'shard_id' in jsondict[1]:
1479 logging.debug("ECobject " + JSON)
1480 for pg in OBJECPGS:
1481 OSDS = get_osds(pg, OSDDIR)
1482 jsondict = json.loads(JSON)
1483 for osd in OSDS:
1484 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1485 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1486 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1487 if not fnames:
1488 continue
1489 if int(basename.split(EC_NAME)[1]) > int(NUM_EC_OBJECTS):
1490 continue
1491 # Fix shard_id since we only have one json instance for each object
1492 jsondict[1]['shard_id'] = int(pg.split('s')[1])
1493 cmd = (CFSD_PREFIX + " '{json}' dump | grep '\"hinfo\": [{{]' > /dev/null").format(osd=osd, json=json.dumps((pg, jsondict[1])))
1494 logging.debug(cmd)
1495 ret = call(cmd, shell=True)
1496 if ret != 0:
1497 logging.error("Invalid dump for {json}".format(json=JSON))
7c673cae
FG
1498
1499 print("Test list-attrs get-attr")
1500 ATTRFILE = r"/tmp/attrs.{pid}".format(pid=pid)
1501 VALFILE = r"/tmp/val.{pid}".format(pid=pid)
1502 for nspace in db.keys():
1503 for basename in db[nspace].keys():
1504 file = os.path.join(DATADIR, nspace + "-" + basename)
1505 JSON = db[nspace][basename]['json']
1506 jsondict = json.loads(JSON)
1507
a8e16298 1508 if 'shard_id' in jsondict[1]:
7c673cae
FG
1509 logging.debug("ECobject " + JSON)
1510 found = 0
1511 for pg in OBJECPGS:
1512 OSDS = get_osds(pg, OSDDIR)
1513 # Fix shard_id since we only have one json instance for each object
a8e16298
TL
1514 jsondict[1]['shard_id'] = int(pg.split('s')[1])
1515 JSON = json.dumps((pg, jsondict[1]))
7c673cae 1516 for osd in OSDS:
a8e16298 1517 cmd = (CFSD_PREFIX + " '{json}' get-attr hinfo_key").format(osd=osd, json=JSON)
7c673cae
FG
1518 logging.debug("TRY: " + cmd)
1519 try:
1520 out = check_output(cmd, shell=True, stderr=subprocess.STDOUT)
1521 logging.debug("FOUND: {json} in {osd} has value '{val}'".format(osd=osd, json=JSON, val=out))
1522 found += 1
1523 except subprocess.CalledProcessError as e:
1524 if "No such file or directory" not in e.output and "No data available" not in e.output:
1525 raise
1526 # Assuming k=2 m=1 for the default ec pool
1527 if found != 3:
1528 logging.error("{json} hinfo_key found {found} times instead of 3".format(json=JSON, found=found))
1529 ERRORS += 1
1530
1531 for pg in ALLPGS:
1532 # Make sure rep obj with rep pg or ec obj with ec pg
a8e16298 1533 if ('shard_id' in jsondict[1]) != (pg.find('s') > 0):
7c673cae 1534 continue
a8e16298 1535 if 'shard_id' in jsondict[1]:
7c673cae 1536 # Fix shard_id since we only have one json instance for each object
a8e16298
TL
1537 jsondict[1]['shard_id'] = int(pg.split('s')[1])
1538 JSON = json.dumps((pg, jsondict[1]))
7c673cae
FG
1539 OSDS = get_osds(pg, OSDDIR)
1540 for osd in OSDS:
1541 DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg))))
1542 fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f))
1543 and f.split("_")[0] == basename and f.split("_")[4] == nspace]
1544 if not fnames:
1545 continue
1546 afd = open(ATTRFILE, "wb")
a8e16298 1547 cmd = (CFSD_PREFIX + " '{json}' list-attrs").format(osd=osd, json=JSON)
7c673cae
FG
1548 logging.debug(cmd)
1549 ret = call(cmd, shell=True, stdout=afd)
1550 afd.close()
1551 if ret != 0:
1552 logging.error("list-attrs failed with {ret}".format(ret=ret))
1553 ERRORS += 1
1554 continue
1555 keys = get_lines(ATTRFILE)
1556 values = dict(db[nspace][basename]["xattr"])
1557 for key in keys:
1558 if key == "_" or key == "snapset" or key == "hinfo_key":
1559 continue
1560 key = key.strip("_")
1561 if key not in values:
1562 logging.error("Unexpected key {key} present".format(key=key))
1563 ERRORS += 1
1564 continue
1565 exp = values.pop(key)
1566 vfd = open(VALFILE, "wb")
a8e16298 1567 cmd = (CFSD_PREFIX + " '{json}' get-attr {key}").format(osd=osd, json=JSON, key="_" + key)
7c673cae
FG
1568 logging.debug(cmd)
1569 ret = call(cmd, shell=True, stdout=vfd)
1570 vfd.close()
1571 if ret != 0:
1572 logging.error("get-attr failed with {ret}".format(ret=ret))
1573 ERRORS += 1
1574 continue
1575 lines = get_lines(VALFILE)
1576 val = lines[0]
1577 if exp != val:
1578 logging.error("For key {key} got value {got} instead of {expected}".format(key=key, got=val, expected=exp))
1579 ERRORS += 1
1580 if len(values) != 0:
1581 logging.error("Not all keys found, remaining keys:")
1582 print(values)
1583
1584 print("Test --op meta-list")
1585 tmpfd = open(TMPFILE, "wb")
1586 cmd = (CFSD_PREFIX + "--op meta-list").format(osd=ONEOSD)
1587 logging.debug(cmd)
1588 ret = call(cmd, shell=True, stdout=tmpfd)
1589 if ret != 0:
1590 logging.error("Bad exit status {ret} from --op meta-list request".format(ret=ret))
1591 ERRORS += 1
1592
1593 print("Test get-bytes on meta")
1594 tmpfd.close()
1595 lines = get_lines(TMPFILE)
1596 JSONOBJ = sorted(set(lines))
1597 for JSON in JSONOBJ:
1598 (pgid, jsondict) = json.loads(JSON)
1599 if pgid != "meta":
1600 logging.error("pgid incorrect for --op meta-list {pgid}".format(pgid=pgid))
1601 ERRORS += 1
1602 if jsondict['namespace'] != "":
1603 logging.error("namespace non null --op meta-list {ns}".format(ns=jsondict['namespace']))
1604 ERRORS += 1
1605 logging.info(JSON)
1606 try:
1607 os.unlink(GETNAME)
1608 except:
1609 pass
1610 cmd = (CFSD_PREFIX + "'{json}' get-bytes {fname}").format(osd=ONEOSD, json=JSON, fname=GETNAME)
1611 logging.debug(cmd)
1612 ret = call(cmd, shell=True)
1613 if ret != 0:
1614 logging.error("Bad exit status {ret}".format(ret=ret))
1615 ERRORS += 1
1616
1617 try:
1618 os.unlink(GETNAME)
1619 except:
1620 pass
1621 try:
1622 os.unlink(TESTNAME)
1623 except:
1624 pass
1625
1626 print("Test pg info")
1627 for pg in ALLREPPGS + ALLECPGS:
1628 for osd in get_osds(pg, OSDDIR):
1629 cmd = (CFSD_PREFIX + "--op info --pgid {pg} | grep '\"pgid\": \"{pg}\"'").format(osd=osd, pg=pg)
1630 logging.debug(cmd)
1631 ret = call(cmd, shell=True, stdout=nullfd)
1632 if ret != 0:
1633 logging.error("Getting info failed for pg {pg} from {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1634 ERRORS += 1
1635
1636 print("Test pg logging")
1637 if len(ALLREPPGS + ALLECPGS) == len(OBJREPPGS + OBJECPGS):
1638 logging.warning("All PGs have objects, so no log without modify entries")
1639 for pg in ALLREPPGS + ALLECPGS:
1640 for osd in get_osds(pg, OSDDIR):
1641 tmpfd = open(TMPFILE, "wb")
1642 cmd = (CFSD_PREFIX + "--op log --pgid {pg}").format(osd=osd, pg=pg)
1643 logging.debug(cmd)
1644 ret = call(cmd, shell=True, stdout=tmpfd)
1645 if ret != 0:
1646 logging.error("Getting log failed for pg {pg} from {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1647 ERRORS += 1
1648 HASOBJ = pg in OBJREPPGS + OBJECPGS
1649 MODOBJ = False
1650 for line in get_lines(TMPFILE):
1651 if line.find("modify") != -1:
1652 MODOBJ = True
1653 break
1654 if HASOBJ != MODOBJ:
1655 logging.error("Bad log for pg {pg} from {osd}".format(pg=pg, osd=osd))
1656 MSG = (HASOBJ and [""] or ["NOT "])[0]
1657 print("Log should {msg}have a modify entry".format(msg=MSG))
1658 ERRORS += 1
1659
1660 try:
1661 os.unlink(TMPFILE)
1662 except:
1663 pass
1664
1665 print("Test list-pgs")
1666 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1667
1668 CHECK_PGS = get_osd_pgs(os.path.join(OSDDIR, osd), None)
1669 CHECK_PGS = sorted(CHECK_PGS)
1670
1671 cmd = (CFSD_PREFIX + "--op list-pgs").format(osd=osd)
1672 logging.debug(cmd)
1673 TEST_PGS = check_output(cmd, shell=True).split("\n")
1674 TEST_PGS = sorted(TEST_PGS)[1:] # Skip extra blank line
1675
1676 if TEST_PGS != CHECK_PGS:
1677 logging.error("list-pgs got wrong result for osd.{osd}".format(osd=osd))
1678 logging.error("Expected {pgs}".format(pgs=CHECK_PGS))
1679 logging.error("Got {pgs}".format(pgs=TEST_PGS))
1680 ERRORS += 1
1681
1682 EXP_ERRORS = 0
1683 print("Test pg export --dry-run")
1684 pg = ALLREPPGS[0]
1685 osd = get_osds(pg, OSDDIR)[0]
1686 fname = "/tmp/fname.{pid}".format(pid=pid)
1687 cmd = (CFSD_PREFIX + "--dry-run --op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
1688 logging.debug(cmd)
1689 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1690 if ret != 0:
1691 logging.error("Exporting --dry-run failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1692 EXP_ERRORS += 1
1693 elif os.path.exists(fname):
1694 logging.error("Exporting --dry-run created file")
1695 EXP_ERRORS += 1
1696
1697 cmd = (CFSD_PREFIX + "--dry-run --op export --pgid {pg} > {file}").format(osd=osd, pg=pg, file=fname)
1698 logging.debug(cmd)
1699 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1700 if ret != 0:
1701 logging.error("Exporting --dry-run failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1702 EXP_ERRORS += 1
1703 else:
1704 outdata = get_lines(fname)
1705 if len(outdata) > 0:
1706 logging.error("Exporting --dry-run to stdout not empty")
1707 logging.error("Data: " + outdata)
1708 EXP_ERRORS += 1
1709
1710 os.mkdir(TESTDIR)
1711 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1712 os.mkdir(os.path.join(TESTDIR, osd))
1713 print("Test pg export")
1714 for pg in ALLREPPGS + ALLECPGS:
1715 for osd in get_osds(pg, OSDDIR):
1716 mydir = os.path.join(TESTDIR, osd)
1717 fname = os.path.join(mydir, pg)
1718 if pg == ALLREPPGS[0]:
1719 cmd = (CFSD_PREFIX + "--op export --pgid {pg} > {file}").format(osd=osd, pg=pg, file=fname)
1720 elif pg == ALLREPPGS[1]:
1721 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file - > {file}").format(osd=osd, pg=pg, file=fname)
1722 else:
1723 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
1724 logging.debug(cmd)
1725 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1726 if ret != 0:
1727 logging.error("Exporting failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1728 EXP_ERRORS += 1
1729
1730 ERRORS += EXP_ERRORS
1731
11fdf7f2
TL
1732 print("Test clear-data-digest")
1733 for nspace in db.keys():
1734 for basename in db[nspace].keys():
1735 JSON = db[nspace][basename]['json']
1736 cmd = (CFSD_PREFIX + "'{json}' clear-data-digest").format(osd='osd0', json=JSON)
1737 logging.debug(cmd)
1738 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1739 if ret != 0:
1740 logging.error("Clearing data digest failed for {json}".format(json=JSON))
1741 ERRORS += 1
1742 break
1743 cmd = (CFSD_PREFIX + "'{json}' dump | grep '\"data_digest\": \"0xff'").format(osd='osd0', json=JSON)
1744 logging.debug(cmd)
1745 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1746 if ret != 0:
1747 logging.error("Data digest not cleared for {json}".format(json=JSON))
1748 ERRORS += 1
1749 break
1750 break
1751 break
1752
7c673cae
FG
1753 print("Test pg removal")
1754 RM_ERRORS = 0
1755 for pg in ALLREPPGS + ALLECPGS:
1756 for osd in get_osds(pg, OSDDIR):
1757 # This should do nothing
1758 cmd = (CFSD_PREFIX + "--op remove --pgid {pg} --dry-run").format(pg=pg, osd=osd)
1759 logging.debug(cmd)
1760 ret = call(cmd, shell=True, stdout=nullfd)
1761 if ret != 0:
1762 logging.error("Removing --dry-run failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1763 RM_ERRORS += 1
3efd9988 1764 cmd = (CFSD_PREFIX + "--force --op remove --pgid {pg}").format(pg=pg, osd=osd)
7c673cae
FG
1765 logging.debug(cmd)
1766 ret = call(cmd, shell=True, stdout=nullfd)
1767 if ret != 0:
1768 logging.error("Removing failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1769 RM_ERRORS += 1
1770
1771 ERRORS += RM_ERRORS
1772
1773 IMP_ERRORS = 0
1774 if EXP_ERRORS == 0 and RM_ERRORS == 0:
1775 print("Test pg import")
1776 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1777 dir = os.path.join(TESTDIR, osd)
1778 PGS = [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))]
1779 for pg in PGS:
1780 file = os.path.join(dir, pg)
b32b8144 1781 # Make sure this doesn't crash
11fdf7f2 1782 cmd = (CFSD_PREFIX + "--op dump-export --file {file}").format(osd=osd, file=file)
b32b8144
FG
1783 logging.debug(cmd)
1784 ret = call(cmd, shell=True, stdout=nullfd)
1785 if ret != 0:
11fdf7f2 1786 logging.error("Dump-export failed from {file} with {ret}".format(file=file, ret=ret))
b32b8144 1787 IMP_ERRORS += 1
7c673cae
FG
1788 # This should do nothing
1789 cmd = (CFSD_PREFIX + "--op import --file {file} --dry-run").format(osd=osd, file=file)
1790 logging.debug(cmd)
1791 ret = call(cmd, shell=True, stdout=nullfd)
1792 if ret != 0:
1793 logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret))
1794 IMP_ERRORS += 1
1795 if pg == PGS[0]:
1796 cmd = ("cat {file} |".format(file=file) + CFSD_PREFIX + "--op import").format(osd=osd)
1797 elif pg == PGS[1]:
1798 cmd = (CFSD_PREFIX + "--op import --file - --pgid {pg} < {file}").format(osd=osd, file=file, pg=pg)
1799 else:
1800 cmd = (CFSD_PREFIX + "--op import --file {file}").format(osd=osd, file=file)
1801 logging.debug(cmd)
1802 ret = call(cmd, shell=True, stdout=nullfd)
1803 if ret != 0:
1804 logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret))
1805 IMP_ERRORS += 1
1806 else:
1807 logging.warning("SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES")
1808
1809 ERRORS += IMP_ERRORS
1810 logging.debug(cmd)
1811
1812 if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0:
1813 print("Verify replicated import data")
1814 data_errors, _ = check_data(DATADIR, TMPFILE, OSDDIR, REP_NAME)
1815 ERRORS += data_errors
1816 else:
1817 logging.warning("SKIPPING CHECKING IMPORT DATA DUE TO PREVIOUS FAILURES")
1818
1819 print("Test all --op dump-journal again")
1820 ALLOSDS = [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]
1821 ERRORS += test_dump_journal(CFSD_PREFIX, ALLOSDS)
1822
1823 vstart(new=False)
1824 wait_for_health()
1825
1826 if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0:
1827 print("Verify erasure coded import data")
1828 ERRORS += verify(DATADIR, EC_POOL, EC_NAME, db)
1829 # Check replicated data/xattr/omap using rados
1830 print("Verify replicated import data using rados")
1831 ERRORS += verify(DATADIR, REP_POOL, REP_NAME, db)
1832
1833 if EXP_ERRORS == 0:
1834 NEWPOOL = "rados-import-pool"
11fdf7f2 1835 cmd = "{path}/ceph osd pool create {pool} 8".format(pool=NEWPOOL, path=CEPH_BIN)
7c673cae
FG
1836 logging.debug(cmd)
1837 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1838
1839 print("Test rados import")
1840 first = True
1841 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1842 dir = os.path.join(TESTDIR, osd)
1843 for pg in [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))]:
1844 if pg.find("{id}.".format(id=REPID)) != 0:
1845 continue
1846 file = os.path.join(dir, pg)
1847 if first:
1848 first = False
1849 # This should do nothing
1850 cmd = "{path}/rados import -p {pool} --dry-run {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN)
1851 logging.debug(cmd)
1852 ret = call(cmd, shell=True, stdout=nullfd)
1853 if ret != 0:
1854 logging.error("Rados import --dry-run failed from {file} with {ret}".format(file=file, ret=ret))
1855 ERRORS += 1
1856 cmd = "{path}/rados -p {pool} ls".format(pool=NEWPOOL, path=CEPH_BIN)
1857 logging.debug(cmd)
1858 data = check_output(cmd, shell=True)
1859 if data:
1860 logging.error("'{data}'".format(data=data))
1861 logging.error("Found objects after dry-run")
1862 ERRORS += 1
1863 cmd = "{path}/rados import -p {pool} {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN)
1864 logging.debug(cmd)
1865 ret = call(cmd, shell=True, stdout=nullfd)
1866 if ret != 0:
1867 logging.error("Rados import failed from {file} with {ret}".format(file=file, ret=ret))
1868 ERRORS += 1
1869 cmd = "{path}/rados import -p {pool} --no-overwrite {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN)
1870 logging.debug(cmd)
1871 ret = call(cmd, shell=True, stdout=nullfd)
1872 if ret != 0:
1873 logging.error("Rados import --no-overwrite failed from {file} with {ret}".format(file=file, ret=ret))
1874 ERRORS += 1
1875
1876 ERRORS += verify(DATADIR, NEWPOOL, REP_NAME, db)
1877 else:
1878 logging.warning("SKIPPING IMPORT-RADOS TESTS DUE TO PREVIOUS FAILURES")
1879
1880 # Clear directories of previous portion
1881 call("/bin/rm -rf {dir}".format(dir=TESTDIR), shell=True)
1882 call("/bin/rm -rf {dir}".format(dir=DATADIR), shell=True)
1883 os.mkdir(TESTDIR)
1884 os.mkdir(DATADIR)
1885
1886 # Cause SPLIT_POOL to split and test import with object/log filtering
1887 print("Testing import all objects after a split")
1888 SPLIT_POOL = "split_pool"
1889 PG_COUNT = 1
1890 SPLIT_OBJ_COUNT = 5
1891 SPLIT_NSPACE_COUNT = 2
1892 SPLIT_NAME = "split"
1893 cmd = "{path}/ceph osd pool create {pool} {pg} {pg} replicated".format(pool=SPLIT_POOL, pg=PG_COUNT, path=CEPH_BIN)
1894 logging.debug(cmd)
1895 call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1896 SPLITID = get_pool_id(SPLIT_POOL, nullfd)
1897 pool_size = int(check_output("{path}/ceph osd pool get {pool} size".format(pool=SPLIT_POOL, path=CEPH_BIN), shell=True, stderr=nullfd).split(" ")[1])
1898 EXP_ERRORS = 0
1899 RM_ERRORS = 0
1900 IMP_ERRORS = 0
1901
1902 objects = range(1, SPLIT_OBJ_COUNT + 1)
1903 nspaces = range(SPLIT_NSPACE_COUNT)
1904 for n in nspaces:
1905 nspace = get_nspace(n)
1906
1907 for i in objects:
1908 NAME = SPLIT_NAME + "{num}".format(num=i)
1909 LNAME = nspace + "-" + NAME
1910 DDNAME = os.path.join(DATADIR, LNAME)
1911 DDNAME += "__head"
1912
1913 cmd = "rm -f " + DDNAME
1914 logging.debug(cmd)
1915 call(cmd, shell=True)
1916
1917 if i == 1:
1918 dataline = range(DATALINECOUNT)
1919 else:
1920 dataline = range(1)
1921 fd = open(DDNAME, "w")
1922 data = "This is the split data for " + LNAME + "\n"
1923 for _ in dataline:
1924 fd.write(data)
1925 fd.close()
1926
1927 cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=SPLIT_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
1928 logging.debug(cmd)
1929 ret = call(cmd, shell=True, stderr=nullfd)
1930 if ret != 0:
1931 logging.critical("Rados put command failed with {ret}".format(ret=ret))
1932 return 1
1933
1934 wait_for_health()
1935 kill_daemons()
1936
1937 for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]:
1938 os.mkdir(os.path.join(TESTDIR, osd))
1939
1940 pg = "{pool}.0".format(pool=SPLITID)
1941 EXPORT_PG = pg
1942
1943 export_osds = get_osds(pg, OSDDIR)
1944 for osd in export_osds:
1945 mydir = os.path.join(TESTDIR, osd)
1946 fname = os.path.join(mydir, pg)
1947 cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
1948 logging.debug(cmd)
1949 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1950 if ret != 0:
1951 logging.error("Exporting failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
1952 EXP_ERRORS += 1
1953
1954 ERRORS += EXP_ERRORS
1955
1956 if EXP_ERRORS == 0:
1957 vstart(new=False)
1958 wait_for_health()
1959
1960 cmd = "{path}/ceph osd pool set {pool} pg_num 2".format(pool=SPLIT_POOL, path=CEPH_BIN)
1961 logging.debug(cmd)
1962 ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
1963 time.sleep(5)
1964 wait_for_health()
1965
1966 kill_daemons()
1967
1968 # Now 2 PGs, poolid.0 and poolid.1
11fdf7f2
TL
1969 # make note of pgs before we remove the pgs...
1970 osds = get_osds("{pool}.0".format(pool=SPLITID), OSDDIR);
7c673cae
FG
1971 for seed in range(2):
1972 pg = "{pool}.{seed}".format(pool=SPLITID, seed=seed)
1973
11fdf7f2 1974 for osd in osds:
3efd9988 1975 cmd = (CFSD_PREFIX + "--force --op remove --pgid {pg}").format(pg=pg, osd=osd)
7c673cae
FG
1976 logging.debug(cmd)
1977 ret = call(cmd, shell=True, stdout=nullfd)
1978
11fdf7f2
TL
1979 which = 0
1980 for osd in osds:
1981 # This is weird. The export files are based on only the EXPORT_PG
1982 # and where that pg was before the split. Use 'which' to use all
1983 # export copies in import.
1984 mydir = os.path.join(TESTDIR, export_osds[which])
1985 fname = os.path.join(mydir, EXPORT_PG)
1986 which += 1
1987 cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=osd, pg=EXPORT_PG, file=fname)
1988 logging.debug(cmd)
1989 ret = call(cmd, shell=True, stdout=nullfd)
1990 if ret != 0:
1991 logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret))
1992 IMP_ERRORS += 1
7c673cae
FG
1993
1994 ERRORS += IMP_ERRORS
1995
1996 # Start up again to make sure imports didn't corrupt anything
1997 if IMP_ERRORS == 0:
1998 print("Verify split import data")
1999 data_errors, count = check_data(DATADIR, TMPFILE, OSDDIR, SPLIT_NAME)
2000 ERRORS += data_errors
2001 if count != (SPLIT_OBJ_COUNT * SPLIT_NSPACE_COUNT * pool_size):
2002 logging.error("Incorrect number of replicas seen {count}".format(count=count))
2003 ERRORS += 1
2004 vstart(new=False)
2005 wait_for_health()
2006
2007 call("/bin/rm -rf {dir}".format(dir=TESTDIR), shell=True)
2008 call("/bin/rm -rf {dir}".format(dir=DATADIR), shell=True)
2009
2010 ERRORS += test_removeall(CFSD_PREFIX, db, OBJREPPGS, REP_POOL, CEPH_BIN, OSDDIR, REP_NAME, NUM_CLONED_REP_OBJECTS)
2011
2012 # vstart() starts 4 OSDs
2013 ERRORS += test_get_set_osdmap(CFSD_PREFIX, list(range(4)), ALLOSDS)
2014 ERRORS += test_get_set_inc_osdmap(CFSD_PREFIX, ALLOSDS[0])
3efd9988
FG
2015
2016 kill_daemons()
2017 CORES = [f for f in os.listdir(CEPH_DIR) if f.startswith("core.")]
2018 if CORES:
2019 CORE_DIR = os.path.join("/tmp", "cores.{pid}".format(pid=os.getpid()))
2020 os.mkdir(CORE_DIR)
2021 call("/bin/mv {ceph_dir}/core.* {core_dir}".format(ceph_dir=CEPH_DIR, core_dir=CORE_DIR), shell=True)
2022 logging.error("Failure due to cores found")
2023 logging.error("See {core_dir} for cores".format(core_dir=CORE_DIR))
2024 ERRORS += len(CORES)
2025
7c673cae
FG
2026 if ERRORS == 0:
2027 print("TEST PASSED")
2028 return 0
2029 else:
2030 print("TEST FAILED WITH {errcount} ERRORS".format(errcount=ERRORS))
2031 return 1
2032
2033
2034def remove_btrfs_subvolumes(path):
2035 if platform.system() == "FreeBSD":
2036 return
2037 result = subprocess.Popen("stat -f -c '%%T' %s" % path, shell=True, stdout=subprocess.PIPE)
2038 for line in result.stdout:
2039 filesystem = decode(line).rstrip('\n')
2040 if filesystem == "btrfs":
2041 result = subprocess.Popen("sudo btrfs subvolume list %s" % path, shell=True, stdout=subprocess.PIPE)
2042 for line in result.stdout:
2043 subvolume = decode(line).split()[8]
2044 # extracting the relative volume name
2045 m = re.search(".*(%s.*)" % path, subvolume)
2046 if m:
2047 found = m.group(1)
2048 call("sudo btrfs subvolume delete %s" % found, shell=True)
2049
2050
2051if __name__ == "__main__":
2052 status = 1
2053 try:
2054 status = main(sys.argv[1:])
2055 finally:
2056 kill_daemons()
3efd9988 2057 os.chdir(CEPH_BUILD_DIR)
7c673cae
FG
2058 remove_btrfs_subvolumes(CEPH_DIR)
2059 call("/bin/rm -fr {dir}".format(dir=CEPH_DIR), shell=True)
2060 sys.exit(status)