]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | #!/usr/bin/env python |
2 | ||
3 | from __future__ import print_function | |
4 | from subprocess import call | |
5 | try: | |
6 | from subprocess import check_output | |
7 | except ImportError: | |
8 | def check_output(*popenargs, **kwargs): | |
9 | import subprocess | |
10 | # backported from python 2.7 stdlib | |
11 | process = subprocess.Popen( | |
12 | stdout=subprocess.PIPE, *popenargs, **kwargs) | |
13 | output, unused_err = process.communicate() | |
14 | retcode = process.poll() | |
15 | if retcode: | |
16 | cmd = kwargs.get("args") | |
17 | if cmd is None: | |
18 | cmd = popenargs[0] | |
19 | error = subprocess.CalledProcessError(retcode, cmd) | |
20 | error.output = output | |
21 | raise error | |
22 | return output | |
23 | ||
24 | import filecmp | |
25 | import os | |
26 | import subprocess | |
27 | import math | |
28 | import time | |
29 | import sys | |
30 | import re | |
31 | import logging | |
32 | import json | |
33 | import tempfile | |
34 | import platform | |
35 | ||
36 | try: | |
37 | from subprocess import DEVNULL | |
38 | except ImportError: | |
39 | DEVNULL = open(os.devnull, "wb") | |
40 | ||
41 | logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.WARNING) | |
42 | ||
43 | ||
44 | if sys.version_info[0] >= 3: | |
45 | def decode(s): | |
46 | return s.decode('utf-8') | |
47 | ||
48 | def check_output(*args, **kwargs): | |
49 | return decode(subprocess.check_output(*args, **kwargs)) | |
50 | else: | |
51 | def decode(s): | |
52 | return s | |
53 | ||
54 | ||
55 | ||
56 | def wait_for_health(): | |
57 | print("Wait for health_ok...", end="") | |
58 | tries = 0 | |
59 | while call("{path}/ceph health 2> /dev/null | grep -v 'HEALTH_OK\|HEALTH_WARN' > /dev/null".format(path=CEPH_BIN), shell=True) == 0: | |
60 | tries += 1 | |
61 | if tries == 150: | |
62 | raise Exception("Time exceeded to go to health") | |
63 | time.sleep(1) | |
64 | print("DONE") | |
65 | ||
66 | ||
67 | def get_pool_id(name, nullfd): | |
68 | cmd = "{path}/ceph osd pool stats {pool}".format(pool=name, path=CEPH_BIN).split() | |
69 | # pool {pool} id # .... grab the 4 field | |
70 | return check_output(cmd, stderr=nullfd).split()[3] | |
71 | ||
72 | ||
73 | # return a list of unique PGS given an osd subdirectory | |
74 | def get_osd_pgs(SUBDIR, ID): | |
75 | PGS = [] | |
76 | if ID: | |
77 | endhead = re.compile("{id}.*_head$".format(id=ID)) | |
78 | DIR = os.path.join(SUBDIR, "current") | |
79 | PGS += [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and (ID is None or endhead.match(f))] | |
80 | PGS = [re.sub("_head", "", p) for p in PGS if "_head" in p] | |
81 | return PGS | |
82 | ||
83 | ||
84 | # return a sorted list of unique PGs given a directory | |
85 | def get_pgs(DIR, ID): | |
86 | OSDS = [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and f.find("osd") == 0] | |
87 | PGS = [] | |
88 | for d in OSDS: | |
89 | SUBDIR = os.path.join(DIR, d) | |
90 | PGS += get_osd_pgs(SUBDIR, ID) | |
91 | return sorted(set(PGS)) | |
92 | ||
93 | ||
94 | # return a sorted list of PGS a subset of ALLPGS that contain objects with prefix specified | |
95 | def get_objs(ALLPGS, prefix, DIR, ID): | |
96 | OSDS = [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and f.find("osd") == 0] | |
97 | PGS = [] | |
98 | for d in OSDS: | |
99 | DIRL2 = os.path.join(DIR, d) | |
100 | SUBDIR = os.path.join(DIRL2, "current") | |
101 | for p in ALLPGS: | |
102 | PGDIR = p + "_head" | |
103 | if not os.path.isdir(os.path.join(SUBDIR, PGDIR)): | |
104 | continue | |
105 | FINALDIR = os.path.join(SUBDIR, PGDIR) | |
106 | # See if there are any objects there | |
107 | if any(f for f in [val for _, _, fl in os.walk(FINALDIR) for val in fl] if f.startswith(prefix)): | |
108 | PGS += [p] | |
109 | return sorted(set(PGS)) | |
110 | ||
111 | ||
112 | # return a sorted list of OSDS which have data from a given PG | |
113 | def get_osds(PG, DIR): | |
114 | ALLOSDS = [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and f.find("osd") == 0] | |
115 | OSDS = [] | |
116 | for d in ALLOSDS: | |
117 | DIRL2 = os.path.join(DIR, d) | |
118 | SUBDIR = os.path.join(DIRL2, "current") | |
119 | PGDIR = PG + "_head" | |
120 | if not os.path.isdir(os.path.join(SUBDIR, PGDIR)): | |
121 | continue | |
122 | OSDS += [d] | |
123 | return sorted(OSDS) | |
124 | ||
125 | ||
126 | def get_lines(filename): | |
127 | tmpfd = open(filename, "r") | |
128 | line = True | |
129 | lines = [] | |
130 | while line: | |
131 | line = tmpfd.readline().rstrip('\n') | |
132 | if line: | |
133 | lines += [line] | |
134 | tmpfd.close() | |
135 | os.unlink(filename) | |
136 | return lines | |
137 | ||
138 | ||
139 | def cat_file(level, filename): | |
140 | if level < logging.getLogger().getEffectiveLevel(): | |
141 | return | |
142 | print("File: " + filename) | |
143 | with open(filename, "r") as f: | |
144 | while True: | |
145 | line = f.readline().rstrip('\n') | |
146 | if not line: | |
147 | break | |
148 | print(line) | |
149 | print("<EOF>") | |
150 | ||
151 | ||
152 | def vstart(new, opt=""): | |
153 | print("vstarting....", end="") | |
154 | NEW = new and "-n" or "-N" | |
155 | call("MON=1 OSD=4 MDS=0 MGR=1 CEPH_PORT=7400 {path}/src/vstart.sh --short -l {new} -d {opt} > /dev/null 2>&1".format(new=NEW, opt=opt, path=CEPH_ROOT), shell=True) | |
156 | print("DONE") | |
157 | ||
158 | ||
159 | def test_failure(cmd, errmsg, tty=False): | |
160 | if tty: | |
161 | try: | |
162 | ttyfd = open("/dev/tty", "rwb") | |
163 | except Exception as e: | |
164 | logging.info(str(e)) | |
165 | logging.info("SKIP " + cmd) | |
166 | return 0 | |
167 | TMPFILE = r"/tmp/tmp.{pid}".format(pid=os.getpid()) | |
168 | tmpfd = open(TMPFILE, "wb") | |
169 | ||
170 | logging.debug(cmd) | |
171 | if tty: | |
172 | ret = call(cmd, shell=True, stdin=ttyfd, stdout=ttyfd, stderr=tmpfd) | |
173 | ttyfd.close() | |
174 | else: | |
175 | ret = call(cmd, shell=True, stderr=tmpfd) | |
176 | tmpfd.close() | |
177 | if ret == 0: | |
178 | logging.error(cmd) | |
179 | logging.error("Should have failed, but got exit 0") | |
180 | return 1 | |
181 | lines = get_lines(TMPFILE) | |
182 | matched = [ l for l in lines if errmsg in l ] | |
183 | if any(matched): | |
184 | logging.info("Correctly failed with message \"" + matched[0] + "\"") | |
185 | return 0 | |
186 | else: | |
187 | logging.error("Command: " + cmd ) | |
188 | logging.error("Bad messages to stderr \"" + str(lines) + "\"") | |
189 | logging.error("Expected \"" + errmsg + "\"") | |
190 | return 1 | |
191 | ||
192 | ||
193 | def get_nspace(num): | |
194 | if num == 0: | |
195 | return "" | |
196 | return "ns{num}".format(num=num) | |
197 | ||
198 | ||
199 | def verify(DATADIR, POOL, NAME_PREFIX, db): | |
200 | TMPFILE = r"/tmp/tmp.{pid}".format(pid=os.getpid()) | |
201 | ERRORS = 0 | |
202 | for rawnsfile in [f for f in os.listdir(DATADIR) if f.split('-')[1].find(NAME_PREFIX) == 0]: | |
203 | nsfile = rawnsfile.split("__")[0] | |
204 | clone = rawnsfile.split("__")[1] | |
205 | nspace = nsfile.split("-")[0] | |
206 | file = nsfile.split("-")[1] | |
207 | # Skip clones | |
208 | if clone != "head": | |
209 | continue | |
210 | path = os.path.join(DATADIR, rawnsfile) | |
211 | try: | |
212 | os.unlink(TMPFILE) | |
213 | except: | |
214 | pass | |
215 | cmd = "{path}/rados -p {pool} -N '{nspace}' get {file} {out}".format(pool=POOL, file=file, out=TMPFILE, nspace=nspace, path=CEPH_BIN) | |
216 | logging.debug(cmd) | |
217 | call(cmd, shell=True, stdout=DEVNULL, stderr=DEVNULL) | |
218 | cmd = "diff -q {src} {result}".format(src=path, result=TMPFILE) | |
219 | logging.debug(cmd) | |
220 | ret = call(cmd, shell=True) | |
221 | if ret != 0: | |
222 | logging.error("{file} data not imported properly".format(file=file)) | |
223 | ERRORS += 1 | |
224 | try: | |
225 | os.unlink(TMPFILE) | |
226 | except: | |
227 | pass | |
228 | for key, val in db[nspace][file]["xattr"].items(): | |
229 | cmd = "{path}/rados -p {pool} -N '{nspace}' getxattr {name} {key}".format(pool=POOL, name=file, key=key, nspace=nspace, path=CEPH_BIN) | |
230 | logging.debug(cmd) | |
231 | getval = check_output(cmd, shell=True, stderr=DEVNULL) | |
232 | logging.debug("getxattr {key} {val}".format(key=key, val=getval)) | |
233 | if getval != val: | |
234 | logging.error("getxattr of key {key} returned wrong val: {get} instead of {orig}".format(key=key, get=getval, orig=val)) | |
235 | ERRORS += 1 | |
236 | continue | |
237 | hdr = db[nspace][file].get("omapheader", "") | |
238 | cmd = "{path}/rados -p {pool} -N '{nspace}' getomapheader {name} {file}".format(pool=POOL, name=file, nspace=nspace, file=TMPFILE, path=CEPH_BIN) | |
239 | logging.debug(cmd) | |
240 | ret = call(cmd, shell=True, stderr=DEVNULL) | |
241 | if ret != 0: | |
242 | logging.error("rados getomapheader returned {ret}".format(ret=ret)) | |
243 | ERRORS += 1 | |
244 | else: | |
245 | getlines = get_lines(TMPFILE) | |
246 | assert(len(getlines) == 0 or len(getlines) == 1) | |
247 | if len(getlines) == 0: | |
248 | gethdr = "" | |
249 | else: | |
250 | gethdr = getlines[0] | |
251 | logging.debug("header: {hdr}".format(hdr=gethdr)) | |
252 | if gethdr != hdr: | |
253 | logging.error("getomapheader returned wrong val: {get} instead of {orig}".format(get=gethdr, orig=hdr)) | |
254 | ERRORS += 1 | |
255 | for key, val in db[nspace][file]["omap"].items(): | |
256 | cmd = "{path}/rados -p {pool} -N '{nspace}' getomapval {name} {key} {file}".format(pool=POOL, name=file, key=key, nspace=nspace, file=TMPFILE, path=CEPH_BIN) | |
257 | logging.debug(cmd) | |
258 | ret = call(cmd, shell=True, stderr=DEVNULL) | |
259 | if ret != 0: | |
260 | logging.error("getomapval returned {ret}".format(ret=ret)) | |
261 | ERRORS += 1 | |
262 | continue | |
263 | getlines = get_lines(TMPFILE) | |
264 | if len(getlines) != 1: | |
265 | logging.error("Bad data from getomapval {lines}".format(lines=getlines)) | |
266 | ERRORS += 1 | |
267 | continue | |
268 | getval = getlines[0] | |
269 | logging.debug("getomapval {key} {val}".format(key=key, val=getval)) | |
270 | if getval != val: | |
271 | logging.error("getomapval returned wrong val: {get} instead of {orig}".format(get=getval, orig=val)) | |
272 | ERRORS += 1 | |
273 | try: | |
274 | os.unlink(TMPFILE) | |
275 | except: | |
276 | pass | |
277 | return ERRORS | |
278 | ||
279 | ||
280 | def check_journal(jsondict): | |
281 | errors = 0 | |
282 | if 'header' not in jsondict: | |
283 | logging.error("Key 'header' not in dump-journal") | |
284 | errors += 1 | |
285 | elif 'max_size' not in jsondict['header']: | |
286 | logging.error("Key 'max_size' not in dump-journal header") | |
287 | errors += 1 | |
288 | else: | |
289 | print("\tJournal max_size = {size}".format(size=jsondict['header']['max_size'])) | |
290 | if 'entries' not in jsondict: | |
291 | logging.error("Key 'entries' not in dump-journal output") | |
292 | errors += 1 | |
293 | elif len(jsondict['entries']) == 0: | |
294 | logging.info("No entries in journal found") | |
295 | else: | |
296 | errors += check_journal_entries(jsondict['entries']) | |
297 | return errors | |
298 | ||
299 | ||
300 | def check_journal_entries(entries): | |
301 | errors = 0 | |
302 | for enum in range(len(entries)): | |
303 | if 'offset' not in entries[enum]: | |
304 | logging.error("No 'offset' key in entry {e}".format(e=enum)) | |
305 | errors += 1 | |
306 | if 'seq' not in entries[enum]: | |
307 | logging.error("No 'seq' key in entry {e}".format(e=enum)) | |
308 | errors += 1 | |
309 | if 'transactions' not in entries[enum]: | |
310 | logging.error("No 'transactions' key in entry {e}".format(e=enum)) | |
311 | errors += 1 | |
312 | elif len(entries[enum]['transactions']) == 0: | |
313 | logging.error("No transactions found in entry {e}".format(e=enum)) | |
314 | errors += 1 | |
315 | else: | |
316 | errors += check_entry_transactions(entries[enum], enum) | |
317 | return errors | |
318 | ||
319 | ||
320 | def check_entry_transactions(entry, enum): | |
321 | errors = 0 | |
322 | for tnum in range(len(entry['transactions'])): | |
323 | if 'trans_num' not in entry['transactions'][tnum]: | |
324 | logging.error("Key 'trans_num' missing from entry {e} trans {t}".format(e=enum, t=tnum)) | |
325 | errors += 1 | |
326 | elif entry['transactions'][tnum]['trans_num'] != tnum: | |
327 | ft = entry['transactions'][tnum]['trans_num'] | |
328 | logging.error("Bad trans_num ({ft}) entry {e} trans {t}".format(ft=ft, e=enum, t=tnum)) | |
329 | errors += 1 | |
330 | if 'ops' not in entry['transactions'][tnum]: | |
331 | logging.error("Key 'ops' missing from entry {e} trans {t}".format(e=enum, t=tnum)) | |
332 | errors += 1 | |
333 | else: | |
334 | errors += check_transaction_ops(entry['transactions'][tnum]['ops'], enum, tnum) | |
335 | return errors | |
336 | ||
337 | ||
338 | def check_transaction_ops(ops, enum, tnum): | |
339 | if len(ops) is 0: | |
340 | logging.warning("No ops found in entry {e} trans {t}".format(e=enum, t=tnum)) | |
341 | errors = 0 | |
342 | for onum in range(len(ops)): | |
343 | if 'op_num' not in ops[onum]: | |
344 | logging.error("Key 'op_num' missing from entry {e} trans {t} op {o}".format(e=enum, t=tnum, o=onum)) | |
345 | errors += 1 | |
346 | elif ops[onum]['op_num'] != onum: | |
347 | fo = ops[onum]['op_num'] | |
348 | logging.error("Bad op_num ({fo}) from entry {e} trans {t} op {o}".format(fo=fo, e=enum, t=tnum, o=onum)) | |
349 | errors += 1 | |
350 | if 'op_name' not in ops[onum]: | |
351 | logging.error("Key 'op_name' missing from entry {e} trans {t} op {o}".format(e=enum, t=tnum, o=onum)) | |
352 | errors += 1 | |
353 | return errors | |
354 | ||
355 | ||
356 | def test_dump_journal(CFSD_PREFIX, osds): | |
357 | ERRORS = 0 | |
358 | pid = os.getpid() | |
359 | TMPFILE = r"/tmp/tmp.{pid}".format(pid=pid) | |
360 | ||
361 | for osd in osds: | |
362 | # Test --op dump-journal by loading json | |
363 | cmd = (CFSD_PREFIX + "--op dump-journal --format json").format(osd=osd) | |
364 | logging.debug(cmd) | |
365 | tmpfd = open(TMPFILE, "wb") | |
366 | ret = call(cmd, shell=True, stdout=tmpfd) | |
367 | if ret != 0: | |
368 | logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd)) | |
369 | ERRORS += 1 | |
370 | continue | |
371 | tmpfd.close() | |
372 | tmpfd = open(TMPFILE, "r") | |
373 | jsondict = json.load(tmpfd) | |
374 | tmpfd.close() | |
375 | os.unlink(TMPFILE) | |
376 | ||
377 | journal_errors = check_journal(jsondict) | |
378 | if journal_errors is not 0: | |
379 | logging.error(jsondict) | |
380 | ERRORS += journal_errors | |
381 | ||
382 | return ERRORS | |
383 | ||
384 | CEPH_BUILD_DIR = os.environ.get('CEPH_BUILD_DIR') | |
385 | CEPH_BIN = os.environ.get('CEPH_BIN') | |
386 | CEPH_ROOT = os.environ.get('CEPH_ROOT') | |
387 | ||
388 | if not CEPH_BUILD_DIR: | |
389 | CEPH_BUILD_DIR=os.getcwd() | |
390 | os.putenv('CEPH_BUILD_DIR', CEPH_BUILD_DIR) | |
391 | CEPH_BIN=CEPH_BUILD_DIR | |
392 | os.putenv('CEPH_BIN', CEPH_BIN) | |
393 | CEPH_ROOT=os.path.dirname(CEPH_BUILD_DIR) | |
394 | os.putenv('CEPH_ROOT', CEPH_ROOT) | |
395 | CEPH_LIB=os.path.join(CEPH_BIN, '.libs') | |
396 | os.putenv('CEPH_LIB', CEPH_LIB) | |
397 | ||
398 | CEPH_DIR = CEPH_BUILD_DIR + "/cot_dir" | |
399 | CEPH_CONF = os.path.join(CEPH_DIR, 'ceph.conf') | |
400 | ||
401 | def kill_daemons(): | |
402 | call("{path}/init-ceph -c {conf} stop > /dev/null 2>&1".format(conf=CEPH_CONF, path=CEPH_BIN), shell=True) | |
403 | ||
404 | ||
405 | def check_data(DATADIR, TMPFILE, OSDDIR, SPLIT_NAME): | |
406 | repcount = 0 | |
407 | ERRORS = 0 | |
408 | for rawnsfile in [f for f in os.listdir(DATADIR) if f.split('-')[1].find(SPLIT_NAME) == 0]: | |
409 | nsfile = rawnsfile.split("__")[0] | |
410 | clone = rawnsfile.split("__")[1] | |
411 | nspace = nsfile.split("-")[0] | |
412 | file = nsfile.split("-")[1] + "__" + clone | |
413 | # Skip clones | |
414 | if clone != "head": | |
415 | continue | |
416 | path = os.path.join(DATADIR, rawnsfile) | |
417 | tmpfd = open(TMPFILE, "wb") | |
418 | cmd = "find {dir} -name '{file}_*_{nspace}_*'".format(dir=OSDDIR, file=file, nspace=nspace) | |
419 | logging.debug(cmd) | |
420 | ret = call(cmd, shell=True, stdout=tmpfd) | |
421 | if ret: | |
422 | logging.critical("INTERNAL ERROR") | |
423 | return 1 | |
424 | tmpfd.close() | |
425 | obj_locs = get_lines(TMPFILE) | |
426 | if len(obj_locs) == 0: | |
427 | logging.error("Can't find imported object {name}".format(name=file)) | |
428 | ERRORS += 1 | |
429 | for obj_loc in obj_locs: | |
430 | # For btrfs skip snap_* dirs | |
431 | if re.search("/snap_[0-9]*/", obj_loc) is not None: | |
432 | continue | |
433 | repcount += 1 | |
434 | cmd = "diff -q {src} {obj_loc}".format(src=path, obj_loc=obj_loc) | |
435 | logging.debug(cmd) | |
436 | ret = call(cmd, shell=True) | |
437 | if ret != 0: | |
438 | logging.error("{file} data not imported properly into {obj}".format(file=file, obj=obj_loc)) | |
439 | ERRORS += 1 | |
440 | return ERRORS, repcount | |
441 | ||
442 | ||
443 | def set_osd_weight(CFSD_PREFIX, osd_ids, osd_path, weight): | |
444 | # change the weight of osd.0 to math.pi in the newest osdmap of given osd | |
445 | osdmap_file = tempfile.NamedTemporaryFile(delete=True) | |
446 | cmd = (CFSD_PREFIX + "--op get-osdmap --file {osdmap_file}").format(osd=osd_path, | |
447 | osdmap_file=osdmap_file.name) | |
448 | output = check_output(cmd, shell=True) | |
449 | epoch = int(re.findall('#(\d+)', output)[0]) | |
450 | ||
451 | new_crush_file = tempfile.NamedTemporaryFile(delete=True) | |
452 | old_crush_file = tempfile.NamedTemporaryFile(delete=True) | |
453 | ret = call("{path}/osdmaptool --export-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name, | |
454 | crush_file=old_crush_file.name, path=CEPH_BIN), | |
455 | stdout=DEVNULL, | |
456 | stderr=DEVNULL, | |
457 | shell=True) | |
458 | assert(ret == 0) | |
459 | ||
460 | for osd_id in osd_ids: | |
461 | cmd = "{path}/crushtool -i {crush_file} --reweight-item osd.{osd} {weight} -o {new_crush_file}".format(osd=osd_id, | |
462 | crush_file=old_crush_file.name, | |
463 | weight=weight, | |
464 | new_crush_file=new_crush_file.name, path=CEPH_BIN) | |
465 | ret = call(cmd, stdout=DEVNULL, shell=True) | |
466 | assert(ret == 0) | |
467 | old_crush_file, new_crush_file = new_crush_file, old_crush_file | |
468 | ||
469 | # change them back, since we don't need to preapre for another round | |
470 | old_crush_file, new_crush_file = new_crush_file, old_crush_file | |
471 | old_crush_file.close() | |
472 | ||
473 | ret = call("{path}/osdmaptool --import-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name, | |
474 | crush_file=new_crush_file.name, path=CEPH_BIN), | |
475 | stdout=DEVNULL, | |
476 | stderr=DEVNULL, | |
477 | shell=True) | |
478 | assert(ret == 0) | |
479 | ||
480 | # Minimum test of --dry-run by using it, but not checking anything | |
481 | cmd = CFSD_PREFIX + "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force --dry-run" | |
482 | cmd = cmd.format(osd=osd_path, osdmap_file=osdmap_file.name, epoch=epoch) | |
483 | ret = call(cmd, stdout=DEVNULL, shell=True) | |
484 | assert(ret == 0) | |
485 | ||
486 | # osdmaptool increases the epoch of the changed osdmap, so we need to force the tool | |
487 | # to use use a different epoch than the one in osdmap | |
488 | cmd = CFSD_PREFIX + "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force" | |
489 | cmd = cmd.format(osd=osd_path, osdmap_file=osdmap_file.name, epoch=epoch) | |
490 | ret = call(cmd, stdout=DEVNULL, shell=True) | |
491 | ||
492 | return ret == 0 | |
493 | ||
494 | def get_osd_weights(CFSD_PREFIX, osd_ids, osd_path): | |
495 | osdmap_file = tempfile.NamedTemporaryFile(delete=True) | |
496 | cmd = (CFSD_PREFIX + "--op get-osdmap --file {osdmap_file}").format(osd=osd_path, | |
497 | osdmap_file=osdmap_file.name) | |
498 | ret = call(cmd, stdout=DEVNULL, shell=True) | |
499 | if ret != 0: | |
500 | return None | |
501 | # we have to read the weights from the crush map, even we can query the weights using | |
502 | # osdmaptool, but please keep in mind, they are different: | |
503 | # item weights in crush map versus weight associated with each osd in osdmap | |
504 | crush_file = tempfile.NamedTemporaryFile(delete=True) | |
505 | ret = call("{path}/osdmaptool --export-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name, | |
506 | crush_file=crush_file.name, path=CEPH_BIN), | |
507 | stdout=DEVNULL, | |
508 | shell=True) | |
509 | assert(ret == 0) | |
510 | output = check_output("{path}/crushtool --tree -i {crush_file} | tail -n {num_osd}".format(crush_file=crush_file.name, | |
511 | num_osd=len(osd_ids), path=CEPH_BIN), | |
512 | stderr=DEVNULL, | |
513 | shell=True) | |
514 | weights = [] | |
515 | for line in output.strip().split('\n'): | |
516 | osd_id, weight, osd_name = re.split('\s+', line) | |
517 | weights.append(float(weight)) | |
518 | ||
519 | return weights | |
520 | ||
521 | ||
522 | def test_get_set_osdmap(CFSD_PREFIX, osd_ids, osd_paths): | |
523 | print("Testing get-osdmap and set-osdmap") | |
524 | errors = 0 | |
525 | kill_daemons() | |
526 | weight = 1 / math.e # just some magic number in [0, 1] | |
527 | changed = [] | |
528 | for osd_path in osd_paths: | |
529 | if set_osd_weight(CFSD_PREFIX, osd_ids, osd_path, weight): | |
530 | changed.append(osd_path) | |
531 | else: | |
532 | logging.warning("Failed to change the weights: {0}".format(osd_path)) | |
533 | # i am pissed off if none of the store gets changed | |
534 | if not changed: | |
535 | errors += 1 | |
536 | ||
537 | for osd_path in changed: | |
538 | weights = get_osd_weights(CFSD_PREFIX, osd_ids, osd_path) | |
539 | if not weights: | |
540 | errors += 1 | |
541 | continue | |
542 | if any(abs(w - weight) > 1e-5 for w in weights): | |
543 | logging.warning("Weight is not changed: {0} != {1}".format(weights, weight)) | |
544 | errors += 1 | |
545 | return errors | |
546 | ||
547 | def test_get_set_inc_osdmap(CFSD_PREFIX, osd_path): | |
548 | # incrementals are not used unless we need to build an MOSDMap to update | |
549 | # OSD's peers, so an obvious way to test it is simply overwrite an epoch | |
550 | # with a different copy, and read it back to see if it matches. | |
551 | kill_daemons() | |
552 | file_e2 = tempfile.NamedTemporaryFile(delete=True) | |
553 | cmd = (CFSD_PREFIX + "--op get-inc-osdmap --file {file}").format(osd=osd_path, | |
554 | file=file_e2.name) | |
555 | output = check_output(cmd, shell=True) | |
556 | epoch = int(re.findall('#(\d+)', output)[0]) | |
557 | # backup e1 incremental before overwriting it | |
558 | epoch -= 1 | |
559 | file_e1_backup = tempfile.NamedTemporaryFile(delete=True) | |
560 | cmd = CFSD_PREFIX + "--op get-inc-osdmap --epoch {epoch} --file {file}" | |
561 | ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_backup.name), shell=True) | |
562 | if ret: return 1 | |
563 | # overwrite e1 with e2 | |
564 | cmd = CFSD_PREFIX + "--op set-inc-osdmap --force --epoch {epoch} --file {file}" | |
565 | ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e2.name), shell=True) | |
566 | if ret: return 1 | |
567 | # Use dry-run to set back to e1 which shouldn't happen | |
568 | cmd = CFSD_PREFIX + "--op set-inc-osdmap --dry-run --epoch {epoch} --file {file}" | |
569 | ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_backup.name), shell=True) | |
570 | if ret: return 1 | |
571 | # read from e1 | |
572 | file_e1_read = tempfile.NamedTemporaryFile(delete=True) | |
573 | cmd = CFSD_PREFIX + "--op get-inc-osdmap --epoch {epoch} --file {file}" | |
574 | ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_read.name), shell=True) | |
575 | if ret: return 1 | |
576 | errors = 0 | |
577 | try: | |
578 | if not filecmp.cmp(file_e2.name, file_e1_read.name, shallow=False): | |
579 | logging.error("{{get,set}}-inc-osdmap mismatch {0} != {1}".format(file_e2.name, file_e1_read.name)) | |
580 | errors += 1 | |
581 | finally: | |
582 | # revert the change with file_e1_backup | |
583 | cmd = CFSD_PREFIX + "--op set-inc-osdmap --epoch {epoch} --file {file}" | |
584 | ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_backup.name), shell=True) | |
585 | if ret: | |
586 | logging.error("Failed to revert the changed inc-osdmap") | |
587 | errors += 1 | |
588 | ||
589 | return errors | |
590 | ||
591 | ||
592 | def test_removeall(CFSD_PREFIX, db, OBJREPPGS, REP_POOL, CEPH_BIN, OSDDIR, REP_NAME, NUM_CLONED_REP_OBJECTS): | |
593 | # Test removeall | |
594 | TMPFILE = r"/tmp/tmp.{pid}".format(pid=os.getpid()) | |
595 | nullfd = open(os.devnull, "w") | |
596 | errors=0 | |
597 | print("Test removeall") | |
598 | kill_daemons() | |
599 | for nspace in db.keys(): | |
600 | for basename in db[nspace].keys(): | |
601 | JSON = db[nspace][basename]['json'] | |
602 | for pg in OBJREPPGS: | |
603 | OSDS = get_osds(pg, OSDDIR) | |
604 | for osd in OSDS: | |
605 | DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg)))) | |
606 | fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f)) | |
607 | and f.split("_")[0] == basename and f.split("_")[4] == nspace] | |
608 | if not fnames: | |
609 | continue | |
610 | ||
611 | if int(basename.split(REP_NAME)[1]) <= int(NUM_CLONED_REP_OBJECTS): | |
612 | cmd = (CFSD_PREFIX + "'{json}' remove").format(osd=osd, json=JSON) | |
613 | errors += test_failure(cmd, "Snapshots are present, use removeall to delete everything") | |
614 | ||
615 | cmd = (CFSD_PREFIX + " --force --dry-run '{json}' remove").format(osd=osd, json=JSON) | |
616 | logging.debug(cmd) | |
617 | ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd) | |
618 | if ret != 0: | |
619 | logging.error("remove with --force failed for {json}".format(json=JSON)) | |
620 | errors += 1 | |
621 | ||
622 | cmd = (CFSD_PREFIX + " --dry-run '{json}' removeall").format(osd=osd, json=JSON) | |
623 | logging.debug(cmd) | |
624 | ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd) | |
625 | if ret != 0: | |
626 | logging.error("removeall failed for {json}".format(json=JSON)) | |
627 | errors += 1 | |
628 | ||
629 | cmd = (CFSD_PREFIX + " '{json}' removeall").format(osd=osd, json=JSON) | |
630 | logging.debug(cmd) | |
631 | ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd) | |
632 | if ret != 0: | |
633 | logging.error("removeall failed for {json}".format(json=JSON)) | |
634 | errors += 1 | |
635 | ||
636 | tmpfd = open(TMPFILE, "w") | |
637 | cmd = (CFSD_PREFIX + "--op list --pgid {pg} --namespace {ns} {name}").format(osd=osd, pg=pg, ns=nspace, name=basename) | |
638 | logging.debug(cmd) | |
639 | ret = call(cmd, shell=True, stdout=tmpfd) | |
640 | if ret != 0: | |
641 | logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd)) | |
642 | errors += 1 | |
643 | tmpfd.close() | |
644 | lines = get_lines(TMPFILE) | |
645 | if len(lines) != 0: | |
646 | logging.error("Removeall didn't remove all objects {ns}/{name} : {lines}".format(ns=nspace, name=basename, lines=lines)) | |
647 | errors += 1 | |
648 | vstart(new=False) | |
649 | wait_for_health() | |
650 | cmd = "{path}/rados -p {pool} rmsnap snap1".format(pool=REP_POOL, path=CEPH_BIN) | |
651 | logging.debug(cmd) | |
652 | ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd) | |
653 | if ret != 0: | |
654 | logging.error("rados rmsnap failed") | |
655 | errors += 1 | |
656 | time.sleep(2) | |
657 | wait_for_health() | |
658 | return errors | |
659 | ||
660 | ||
661 | def main(argv): | |
662 | if sys.version_info[0] < 3: | |
663 | sys.stdout = stdout = os.fdopen(sys.stdout.fileno(), 'wb', 0) | |
664 | else: | |
665 | stdout = sys.stdout.buffer | |
666 | if len(argv) > 1 and argv[1] == "debug": | |
667 | nullfd = stdout | |
668 | else: | |
669 | nullfd = DEVNULL | |
670 | ||
671 | call("rm -fr {dir}; mkdir {dir}".format(dir=CEPH_DIR), shell=True) | |
672 | os.environ["CEPH_DIR"] = CEPH_DIR | |
673 | OSDDIR = os.path.join(CEPH_DIR, "dev") | |
674 | REP_POOL = "rep_pool" | |
675 | REP_NAME = "REPobject" | |
676 | EC_POOL = "ec_pool" | |
677 | EC_NAME = "ECobject" | |
678 | if len(argv) > 0 and argv[0] == 'large': | |
679 | PG_COUNT = 12 | |
680 | NUM_REP_OBJECTS = 800 | |
681 | NUM_CLONED_REP_OBJECTS = 100 | |
682 | NUM_EC_OBJECTS = 12 | |
683 | NUM_NSPACES = 4 | |
684 | # Larger data sets for first object per namespace | |
685 | DATALINECOUNT = 50000 | |
686 | # Number of objects to do xattr/omap testing on | |
687 | ATTR_OBJS = 10 | |
688 | else: | |
689 | PG_COUNT = 4 | |
690 | NUM_REP_OBJECTS = 2 | |
691 | NUM_CLONED_REP_OBJECTS = 2 | |
692 | NUM_EC_OBJECTS = 2 | |
693 | NUM_NSPACES = 2 | |
694 | # Larger data sets for first object per namespace | |
695 | DATALINECOUNT = 10 | |
696 | # Number of objects to do xattr/omap testing on | |
697 | ATTR_OBJS = 2 | |
698 | ERRORS = 0 | |
699 | pid = os.getpid() | |
700 | TESTDIR = "/tmp/test.{pid}".format(pid=pid) | |
701 | DATADIR = "/tmp/data.{pid}".format(pid=pid) | |
702 | CFSD_PREFIX = CEPH_BIN + "/ceph-objectstore-tool --data-path " + OSDDIR + "/{osd} " | |
703 | PROFNAME = "testecprofile" | |
704 | ||
705 | os.environ['CEPH_CONF'] = CEPH_CONF | |
706 | vstart(new=True) | |
707 | wait_for_health() | |
708 | ||
709 | cmd = "{path}/ceph osd pool create {pool} {pg} {pg} replicated".format(pool=REP_POOL, pg=PG_COUNT, path=CEPH_BIN) | |
710 | logging.debug(cmd) | |
711 | call(cmd, shell=True, stdout=nullfd, stderr=nullfd) | |
712 | REPID = get_pool_id(REP_POOL, nullfd) | |
713 | ||
714 | print("Created Replicated pool #{repid}".format(repid=REPID)) | |
715 | ||
224ce89b | 716 | cmd = "{path}/ceph osd erasure-code-profile set {prof} crush-failure-domain=osd".format(prof=PROFNAME, path=CEPH_BIN) |
7c673cae FG |
717 | logging.debug(cmd) |
718 | call(cmd, shell=True, stdout=nullfd, stderr=nullfd) | |
719 | cmd = "{path}/ceph osd erasure-code-profile get {prof}".format(prof=PROFNAME, path=CEPH_BIN) | |
720 | logging.debug(cmd) | |
721 | call(cmd, shell=True, stdout=nullfd, stderr=nullfd) | |
722 | cmd = "{path}/ceph osd pool create {pool} {pg} {pg} erasure {prof}".format(pool=EC_POOL, prof=PROFNAME, pg=PG_COUNT, path=CEPH_BIN) | |
723 | logging.debug(cmd) | |
724 | call(cmd, shell=True, stdout=nullfd, stderr=nullfd) | |
725 | ECID = get_pool_id(EC_POOL, nullfd) | |
726 | ||
727 | print("Created Erasure coded pool #{ecid}".format(ecid=ECID)) | |
728 | ||
729 | print("Creating {objs} objects in replicated pool".format(objs=(NUM_REP_OBJECTS*NUM_NSPACES))) | |
730 | cmd = "mkdir -p {datadir}".format(datadir=DATADIR) | |
731 | logging.debug(cmd) | |
732 | call(cmd, shell=True) | |
733 | ||
734 | db = {} | |
735 | ||
736 | objects = range(1, NUM_REP_OBJECTS + 1) | |
737 | nspaces = range(NUM_NSPACES) | |
738 | for n in nspaces: | |
739 | nspace = get_nspace(n) | |
740 | ||
741 | db[nspace] = {} | |
742 | ||
743 | for i in objects: | |
744 | NAME = REP_NAME + "{num}".format(num=i) | |
745 | LNAME = nspace + "-" + NAME | |
746 | DDNAME = os.path.join(DATADIR, LNAME) | |
747 | DDNAME += "__head" | |
748 | ||
749 | cmd = "rm -f " + DDNAME | |
750 | logging.debug(cmd) | |
751 | call(cmd, shell=True) | |
752 | ||
753 | if i == 1: | |
754 | dataline = range(DATALINECOUNT) | |
755 | else: | |
756 | dataline = range(1) | |
757 | fd = open(DDNAME, "w") | |
758 | data = "This is the replicated data for " + LNAME + "\n" | |
759 | for _ in dataline: | |
760 | fd.write(data) | |
761 | fd.close() | |
762 | ||
763 | cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=REP_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN) | |
764 | logging.debug(cmd) | |
765 | ret = call(cmd, shell=True, stderr=nullfd) | |
766 | if ret != 0: | |
767 | logging.critical("Rados put command failed with {ret}".format(ret=ret)) | |
768 | return 1 | |
769 | ||
770 | db[nspace][NAME] = {} | |
771 | ||
772 | if i < ATTR_OBJS + 1: | |
773 | keys = range(i) | |
774 | else: | |
775 | keys = range(0) | |
776 | db[nspace][NAME]["xattr"] = {} | |
777 | for k in keys: | |
778 | if k == 0: | |
779 | continue | |
780 | mykey = "key{i}-{k}".format(i=i, k=k) | |
781 | myval = "val{i}-{k}".format(i=i, k=k) | |
782 | cmd = "{path}/rados -p {pool} -N '{nspace}' setxattr {name} {key} {val}".format(pool=REP_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN) | |
783 | logging.debug(cmd) | |
784 | ret = call(cmd, shell=True) | |
785 | if ret != 0: | |
786 | logging.error("setxattr failed with {ret}".format(ret=ret)) | |
787 | ERRORS += 1 | |
788 | db[nspace][NAME]["xattr"][mykey] = myval | |
789 | ||
790 | # Create omap header in all objects but REPobject1 | |
791 | if i < ATTR_OBJS + 1 and i != 1: | |
792 | myhdr = "hdr{i}".format(i=i) | |
793 | cmd = "{path}/rados -p {pool} -N '{nspace}' setomapheader {name} {hdr}".format(pool=REP_POOL, name=NAME, hdr=myhdr, nspace=nspace, path=CEPH_BIN) | |
794 | logging.debug(cmd) | |
795 | ret = call(cmd, shell=True) | |
796 | if ret != 0: | |
797 | logging.critical("setomapheader failed with {ret}".format(ret=ret)) | |
798 | ERRORS += 1 | |
799 | db[nspace][NAME]["omapheader"] = myhdr | |
800 | ||
801 | db[nspace][NAME]["omap"] = {} | |
802 | for k in keys: | |
803 | if k == 0: | |
804 | continue | |
805 | mykey = "okey{i}-{k}".format(i=i, k=k) | |
806 | myval = "oval{i}-{k}".format(i=i, k=k) | |
807 | cmd = "{path}/rados -p {pool} -N '{nspace}' setomapval {name} {key} {val}".format(pool=REP_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN) | |
808 | logging.debug(cmd) | |
809 | ret = call(cmd, shell=True) | |
810 | if ret != 0: | |
811 | logging.critical("setomapval failed with {ret}".format(ret=ret)) | |
812 | db[nspace][NAME]["omap"][mykey] = myval | |
813 | ||
814 | # Create some clones | |
815 | cmd = "{path}/rados -p {pool} mksnap snap1".format(pool=REP_POOL, path=CEPH_BIN) | |
816 | logging.debug(cmd) | |
817 | call(cmd, shell=True) | |
818 | ||
819 | objects = range(1, NUM_CLONED_REP_OBJECTS + 1) | |
820 | nspaces = range(NUM_NSPACES) | |
821 | for n in nspaces: | |
822 | nspace = get_nspace(n) | |
823 | ||
824 | for i in objects: | |
825 | NAME = REP_NAME + "{num}".format(num=i) | |
826 | LNAME = nspace + "-" + NAME | |
827 | DDNAME = os.path.join(DATADIR, LNAME) | |
828 | # First clone | |
829 | CLONENAME = DDNAME + "__1" | |
830 | DDNAME += "__head" | |
831 | ||
832 | cmd = "mv -f " + DDNAME + " " + CLONENAME | |
833 | logging.debug(cmd) | |
834 | call(cmd, shell=True) | |
835 | ||
836 | if i == 1: | |
837 | dataline = range(DATALINECOUNT) | |
838 | else: | |
839 | dataline = range(1) | |
840 | fd = open(DDNAME, "w") | |
841 | data = "This is the replicated data after a snapshot for " + LNAME + "\n" | |
842 | for _ in dataline: | |
843 | fd.write(data) | |
844 | fd.close() | |
845 | ||
846 | cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=REP_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN) | |
847 | logging.debug(cmd) | |
848 | ret = call(cmd, shell=True, stderr=nullfd) | |
849 | if ret != 0: | |
850 | logging.critical("Rados put command failed with {ret}".format(ret=ret)) | |
851 | return 1 | |
852 | ||
853 | print("Creating {objs} objects in erasure coded pool".format(objs=(NUM_EC_OBJECTS*NUM_NSPACES))) | |
854 | ||
855 | objects = range(1, NUM_EC_OBJECTS + 1) | |
856 | nspaces = range(NUM_NSPACES) | |
857 | for n in nspaces: | |
858 | nspace = get_nspace(n) | |
859 | ||
860 | for i in objects: | |
861 | NAME = EC_NAME + "{num}".format(num=i) | |
862 | LNAME = nspace + "-" + NAME | |
863 | DDNAME = os.path.join(DATADIR, LNAME) | |
864 | DDNAME += "__head" | |
865 | ||
866 | cmd = "rm -f " + DDNAME | |
867 | logging.debug(cmd) | |
868 | call(cmd, shell=True) | |
869 | ||
870 | if i == 1: | |
871 | dataline = range(DATALINECOUNT) | |
872 | else: | |
873 | dataline = range(1) | |
874 | fd = open(DDNAME, "w") | |
875 | data = "This is the erasure coded data for " + LNAME + "\n" | |
876 | for j in dataline: | |
877 | fd.write(data) | |
878 | fd.close() | |
879 | ||
880 | cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=EC_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN) | |
881 | logging.debug(cmd) | |
882 | ret = call(cmd, shell=True, stderr=nullfd) | |
883 | if ret != 0: | |
884 | logging.critical("Erasure coded pool creation failed with {ret}".format(ret=ret)) | |
885 | return 1 | |
886 | ||
887 | db[nspace][NAME] = {} | |
888 | ||
889 | db[nspace][NAME]["xattr"] = {} | |
890 | if i < ATTR_OBJS + 1: | |
891 | keys = range(i) | |
892 | else: | |
893 | keys = range(0) | |
894 | for k in keys: | |
895 | if k == 0: | |
896 | continue | |
897 | mykey = "key{i}-{k}".format(i=i, k=k) | |
898 | myval = "val{i}-{k}".format(i=i, k=k) | |
899 | cmd = "{path}/rados -p {pool} -N '{nspace}' setxattr {name} {key} {val}".format(pool=EC_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN) | |
900 | logging.debug(cmd) | |
901 | ret = call(cmd, shell=True) | |
902 | if ret != 0: | |
903 | logging.error("setxattr failed with {ret}".format(ret=ret)) | |
904 | ERRORS += 1 | |
905 | db[nspace][NAME]["xattr"][mykey] = myval | |
906 | ||
907 | # Omap isn't supported in EC pools | |
908 | db[nspace][NAME]["omap"] = {} | |
909 | ||
910 | logging.debug(db) | |
911 | ||
912 | kill_daemons() | |
913 | ||
914 | if ERRORS: | |
915 | logging.critical("Unable to set up test") | |
916 | return 1 | |
917 | ||
918 | ALLREPPGS = get_pgs(OSDDIR, REPID) | |
919 | logging.debug(ALLREPPGS) | |
920 | ALLECPGS = get_pgs(OSDDIR, ECID) | |
921 | logging.debug(ALLECPGS) | |
922 | ||
923 | OBJREPPGS = get_objs(ALLREPPGS, REP_NAME, OSDDIR, REPID) | |
924 | logging.debug(OBJREPPGS) | |
925 | OBJECPGS = get_objs(ALLECPGS, EC_NAME, OSDDIR, ECID) | |
926 | logging.debug(OBJECPGS) | |
927 | ||
928 | ONEPG = ALLREPPGS[0] | |
929 | logging.debug(ONEPG) | |
930 | osds = get_osds(ONEPG, OSDDIR) | |
931 | ONEOSD = osds[0] | |
932 | logging.debug(ONEOSD) | |
933 | ||
934 | print("Test invalid parameters") | |
935 | # On export can't use stdout to a terminal | |
936 | cmd = (CFSD_PREFIX + "--op export --pgid {pg}").format(osd=ONEOSD, pg=ONEPG) | |
937 | ERRORS += test_failure(cmd, "stdout is a tty and no --file filename specified", tty=True) | |
938 | ||
939 | # On export can't use stdout to a terminal | |
940 | cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file -").format(osd=ONEOSD, pg=ONEPG) | |
941 | ERRORS += test_failure(cmd, "stdout is a tty and no --file filename specified", tty=True) | |
942 | ||
943 | # Prep a valid ec export file for import failure tests | |
944 | ONEECPG = ALLECPGS[0] | |
945 | osds = get_osds(ONEECPG, OSDDIR) | |
946 | ONEECOSD = osds[0] | |
947 | OTHERFILE = "/tmp/foo.{pid}".format(pid=pid) | |
948 | cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=ONEECOSD, pg=ONEECPG, file=OTHERFILE) | |
949 | logging.debug(cmd) | |
950 | call(cmd, shell=True, stdout=nullfd, stderr=nullfd) | |
951 | ||
952 | # On import can't specify a different shard | |
953 | BADPG = ONEECPG.split('s')[0] + "s10" | |
954 | cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=ONEECOSD, pg=BADPG, file=OTHERFILE) | |
955 | ERRORS += test_failure(cmd, "Can't specify a different shard, must be") | |
956 | ||
957 | os.unlink(OTHERFILE) | |
958 | ||
959 | # Prep a valid export file for import failure tests | |
960 | OTHERFILE = "/tmp/foo.{pid}".format(pid=pid) | |
961 | cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=ONEOSD, pg=ONEPG, file=OTHERFILE) | |
962 | logging.debug(cmd) | |
963 | call(cmd, shell=True, stdout=nullfd, stderr=nullfd) | |
964 | ||
965 | # On import can't specify a PG with a non-existent pool | |
966 | cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=ONEOSD, pg="10.0", file=OTHERFILE) | |
967 | ERRORS += test_failure(cmd, "Can't specify a different pgid pool, must be") | |
968 | ||
969 | # On import can't specify shard for a replicated export | |
970 | cmd = (CFSD_PREFIX + "--op import --pgid {pg}s0 --file {file}").format(osd=ONEOSD, pg=ONEPG, file=OTHERFILE) | |
971 | ERRORS += test_failure(cmd, "Can't specify a sharded pgid with a non-sharded export") | |
972 | ||
973 | # On import can't specify a PG with a bad seed | |
974 | TMPPG="{pool}.80".format(pool=REPID) | |
975 | cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=ONEOSD, pg=TMPPG, file=OTHERFILE) | |
976 | ERRORS += test_failure(cmd, "Illegal pgid, the seed is larger than current pg_num") | |
977 | ||
978 | os.unlink(OTHERFILE) | |
979 | cmd = (CFSD_PREFIX + "--op import --file {FOO}").format(osd=ONEOSD, FOO=OTHERFILE) | |
980 | ERRORS += test_failure(cmd, "file: {FOO}: No such file or directory".format(FOO=OTHERFILE)) | |
981 | ||
982 | cmd = "{path}/ceph-objectstore-tool --data-path BAD_DATA_PATH --op list".format(osd=ONEOSD, path=CEPH_BIN) | |
983 | ERRORS += test_failure(cmd, "data-path: BAD_DATA_PATH: No such file or directory") | |
984 | ||
985 | cmd = "{path}/ceph-objectstore-tool --journal-path BAD_JOURNAL_PATH --op dump-journal".format(path=CEPH_BIN) | |
986 | ERRORS += test_failure(cmd, "journal-path: BAD_JOURNAL_PATH: (2) No such file or directory") | |
987 | ||
988 | # On import can't use stdin from a terminal | |
989 | cmd = (CFSD_PREFIX + "--op import --pgid {pg}").format(osd=ONEOSD, pg=ONEPG) | |
990 | ERRORS += test_failure(cmd, "stdin is a tty and no --file filename specified", tty=True) | |
991 | ||
992 | # On import can't use stdin from a terminal | |
993 | cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file -").format(osd=ONEOSD, pg=ONEPG) | |
994 | ERRORS += test_failure(cmd, "stdin is a tty and no --file filename specified", tty=True) | |
995 | ||
996 | # Specify a bad --type | |
997 | os.mkdir(OSDDIR + "/fakeosd") | |
998 | cmd = ("{path}/ceph-objectstore-tool --data-path " + OSDDIR + "/{osd} --type foobar --op list --pgid {pg}").format(osd="fakeosd", pg=ONEPG, path=CEPH_BIN) | |
999 | ERRORS += test_failure(cmd, "Unable to create store of type foobar") | |
1000 | ||
1001 | # Don't specify a data-path | |
1002 | cmd = "{path}/ceph-objectstore-tool --type memstore --op list --pgid {pg}".format(dir=OSDDIR, osd=ONEOSD, pg=ONEPG, path=CEPH_BIN) | |
1003 | ERRORS += test_failure(cmd, "Must provide --data-path") | |
1004 | ||
1005 | cmd = (CFSD_PREFIX + "--op remove").format(osd=ONEOSD) | |
1006 | ERRORS += test_failure(cmd, "Must provide pgid") | |
1007 | ||
1008 | # Don't secify a --op nor object command | |
1009 | cmd = CFSD_PREFIX.format(osd=ONEOSD) | |
1010 | ERRORS += test_failure(cmd, "Must provide --op or object command...") | |
1011 | ||
1012 | # Specify a bad --op command | |
1013 | cmd = (CFSD_PREFIX + "--op oops").format(osd=ONEOSD) | |
1014 | ERRORS += test_failure(cmd, "Must provide --op (info, log, remove, mkfs, fsck, export, import, list, fix-lost, list-pgs, rm-past-intervals, dump-journal, dump-super, meta-list, get-osdmap, set-osdmap, get-inc-osdmap, set-inc-osdmap, mark-complete)") | |
1015 | ||
1016 | # Provide just the object param not a command | |
1017 | cmd = (CFSD_PREFIX + "object").format(osd=ONEOSD) | |
1018 | ERRORS += test_failure(cmd, "Invalid syntax, missing command") | |
1019 | ||
1020 | # Provide an object name that doesn't exist | |
1021 | cmd = (CFSD_PREFIX + "NON_OBJECT get-bytes").format(osd=ONEOSD) | |
1022 | ERRORS += test_failure(cmd, "No object id 'NON_OBJECT' found") | |
1023 | ||
1024 | # Provide an invalid object command | |
1025 | cmd = (CFSD_PREFIX + "--pgid {pg} '' notacommand").format(osd=ONEOSD, pg=ONEPG) | |
1026 | ERRORS += test_failure(cmd, "Unknown object command 'notacommand'") | |
1027 | ||
1028 | cmd = (CFSD_PREFIX + "foo list-omap").format(osd=ONEOSD, pg=ONEPG) | |
1029 | ERRORS += test_failure(cmd, "No object id 'foo' found or invalid JSON specified") | |
1030 | ||
1031 | cmd = (CFSD_PREFIX + "'{{\"oid\":\"obj4\",\"key\":\"\",\"snapid\":-1,\"hash\":2826278768,\"max\":0,\"pool\":1,\"namespace\":\"\"}}' list-omap").format(osd=ONEOSD, pg=ONEPG) | |
1032 | ERRORS += test_failure(cmd, "Without --pgid the object '{\"oid\":\"obj4\",\"key\":\"\",\"snapid\":-1,\"hash\":2826278768,\"max\":0,\"pool\":1,\"namespace\":\"\"}' must be a JSON array") | |
1033 | ||
1034 | cmd = (CFSD_PREFIX + "'[]' list-omap").format(osd=ONEOSD, pg=ONEPG) | |
1035 | ERRORS += test_failure(cmd, "Object '[]' must be a JSON array with 2 elements") | |
1036 | ||
1037 | cmd = (CFSD_PREFIX + "'[\"1.0\"]' list-omap").format(osd=ONEOSD, pg=ONEPG) | |
1038 | ERRORS += test_failure(cmd, "Object '[\"1.0\"]' must be a JSON array with 2 elements") | |
1039 | ||
1040 | cmd = (CFSD_PREFIX + "'[\"1.0\", 5, 8, 9]' list-omap").format(osd=ONEOSD, pg=ONEPG) | |
1041 | ERRORS += test_failure(cmd, "Object '[\"1.0\", 5, 8, 9]' must be a JSON array with 2 elements") | |
1042 | ||
1043 | cmd = (CFSD_PREFIX + "'[1, 2]' list-omap").format(osd=ONEOSD, pg=ONEPG) | |
1044 | ERRORS += test_failure(cmd, "Object '[1, 2]' must be a JSON array with the first element a string") | |
1045 | ||
1046 | cmd = (CFSD_PREFIX + "'[\"1.3\",{{\"snapid\":\"not an int\"}}]' list-omap").format(osd=ONEOSD, pg=ONEPG) | |
1047 | ERRORS += test_failure(cmd, "Decode object JSON error: value type is 2 not 4") | |
1048 | ||
1049 | TMPFILE = r"/tmp/tmp.{pid}".format(pid=pid) | |
1050 | ALLPGS = OBJREPPGS + OBJECPGS | |
1051 | OSDS = get_osds(ALLPGS[0], OSDDIR) | |
1052 | osd = OSDS[0] | |
1053 | ||
1054 | print("Test all --op dump-journal") | |
1055 | ALLOSDS = [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0] | |
1056 | ERRORS += test_dump_journal(CFSD_PREFIX, ALLOSDS) | |
1057 | ||
1058 | # Test --op list and generate json for all objects | |
1059 | print("Test --op list variants") | |
1060 | ||
1061 | # retrieve all objects from all PGs | |
1062 | tmpfd = open(TMPFILE, "wb") | |
1063 | cmd = (CFSD_PREFIX + "--op list --format json").format(osd=osd) | |
1064 | logging.debug(cmd) | |
1065 | ret = call(cmd, shell=True, stdout=tmpfd) | |
1066 | if ret != 0: | |
1067 | logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd)) | |
1068 | ERRORS += 1 | |
1069 | tmpfd.close() | |
1070 | lines = get_lines(TMPFILE) | |
1071 | JSONOBJ = sorted(set(lines)) | |
1072 | (pgid, coll, jsondict) = json.loads(JSONOBJ[0])[0] | |
1073 | ||
1074 | # retrieve all objects in a given PG | |
1075 | tmpfd = open(OTHERFILE, "ab") | |
1076 | cmd = (CFSD_PREFIX + "--op list --pgid {pg} --format json").format(osd=osd, pg=pgid) | |
1077 | logging.debug(cmd) | |
1078 | ret = call(cmd, shell=True, stdout=tmpfd) | |
1079 | if ret != 0: | |
1080 | logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd)) | |
1081 | ERRORS += 1 | |
1082 | tmpfd.close() | |
1083 | lines = get_lines(OTHERFILE) | |
1084 | JSONOBJ = sorted(set(lines)) | |
1085 | (other_pgid, other_coll, other_jsondict) = json.loads(JSONOBJ[0])[0] | |
1086 | ||
1087 | if pgid != other_pgid or jsondict != other_jsondict or coll != other_coll: | |
1088 | logging.error("the first line of --op list is different " | |
1089 | "from the first line of --op list --pgid {pg}".format(pg=pgid)) | |
1090 | ERRORS += 1 | |
1091 | ||
1092 | # retrieve all objects with a given name in a given PG | |
1093 | tmpfd = open(OTHERFILE, "wb") | |
1094 | cmd = (CFSD_PREFIX + "--op list --pgid {pg} {object} --format json").format(osd=osd, pg=pgid, object=jsondict['oid']) | |
1095 | logging.debug(cmd) | |
1096 | ret = call(cmd, shell=True, stdout=tmpfd) | |
1097 | if ret != 0: | |
1098 | logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd)) | |
1099 | ERRORS += 1 | |
1100 | tmpfd.close() | |
1101 | lines = get_lines(OTHERFILE) | |
1102 | JSONOBJ = sorted(set(lines)) | |
1103 | (other_pgid, other_coll, other_jsondict) in json.loads(JSONOBJ[0])[0] | |
1104 | ||
1105 | if pgid != other_pgid or jsondict != other_jsondict or coll != other_coll: | |
1106 | logging.error("the first line of --op list is different " | |
1107 | "from the first line of --op list --pgid {pg} {object}".format(pg=pgid, object=jsondict['oid'])) | |
1108 | ERRORS += 1 | |
1109 | ||
1110 | print("Test --op list by generating json for all objects using default format") | |
1111 | for pg in ALLPGS: | |
1112 | OSDS = get_osds(pg, OSDDIR) | |
1113 | for osd in OSDS: | |
1114 | tmpfd = open(TMPFILE, "ab") | |
1115 | cmd = (CFSD_PREFIX + "--op list --pgid {pg}").format(osd=osd, pg=pg) | |
1116 | logging.debug(cmd) | |
1117 | ret = call(cmd, shell=True, stdout=tmpfd) | |
1118 | if ret != 0: | |
1119 | logging.error("Bad exit status {ret} from --op list request".format(ret=ret)) | |
1120 | ERRORS += 1 | |
1121 | ||
1122 | tmpfd.close() | |
1123 | lines = get_lines(TMPFILE) | |
1124 | JSONOBJ = sorted(set(lines)) | |
1125 | for JSON in JSONOBJ: | |
1126 | (pgid, jsondict) = json.loads(JSON) | |
1127 | # Skip clones for now | |
1128 | if jsondict['snapid'] != -2: | |
1129 | continue | |
1130 | db[jsondict['namespace']][jsondict['oid']]['json'] = json.dumps((pgid, jsondict)) | |
1131 | # print db[jsondict['namespace']][jsondict['oid']]['json'] | |
1132 | if jsondict['oid'].find(EC_NAME) == 0 and 'shard_id' not in jsondict: | |
1133 | logging.error("Malformed JSON {json}".format(json=JSON)) | |
1134 | ERRORS += 1 | |
1135 | ||
1136 | # Test get-bytes | |
1137 | print("Test get-bytes and set-bytes") | |
1138 | for nspace in db.keys(): | |
1139 | for basename in db[nspace].keys(): | |
1140 | file = os.path.join(DATADIR, nspace + "-" + basename + "__head") | |
1141 | JSON = db[nspace][basename]['json'] | |
1142 | GETNAME = "/tmp/getbytes.{pid}".format(pid=pid) | |
1143 | TESTNAME = "/tmp/testbytes.{pid}".format(pid=pid) | |
1144 | SETNAME = "/tmp/setbytes.{pid}".format(pid=pid) | |
1145 | BADNAME = "/tmp/badbytes.{pid}".format(pid=pid) | |
1146 | for pg in OBJREPPGS: | |
1147 | OSDS = get_osds(pg, OSDDIR) | |
1148 | for osd in OSDS: | |
1149 | DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg)))) | |
1150 | fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f)) | |
1151 | and f.split("_")[0] == basename and f.split("_")[4] == nspace] | |
1152 | if not fnames: | |
1153 | continue | |
1154 | try: | |
1155 | os.unlink(GETNAME) | |
1156 | except: | |
1157 | pass | |
1158 | cmd = (CFSD_PREFIX + " --pgid {pg} '{json}' get-bytes {fname}").format(osd=osd, pg=pg, json=JSON, fname=GETNAME) | |
1159 | logging.debug(cmd) | |
1160 | ret = call(cmd, shell=True) | |
1161 | if ret != 0: | |
1162 | logging.error("Bad exit status {ret}".format(ret=ret)) | |
1163 | ERRORS += 1 | |
1164 | continue | |
1165 | cmd = "diff -q {file} {getfile}".format(file=file, getfile=GETNAME) | |
1166 | ret = call(cmd, shell=True) | |
1167 | if ret != 0: | |
1168 | logging.error("Data from get-bytes differ") | |
1169 | logging.debug("Got:") | |
1170 | cat_file(logging.DEBUG, GETNAME) | |
1171 | logging.debug("Expected:") | |
1172 | cat_file(logging.DEBUG, file) | |
1173 | ERRORS += 1 | |
1174 | fd = open(SETNAME, "w") | |
1175 | data = "put-bytes going into {file}\n".format(file=file) | |
1176 | fd.write(data) | |
1177 | fd.close() | |
1178 | cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' set-bytes {sname}").format(osd=osd, pg=pg, json=JSON, sname=SETNAME) | |
1179 | logging.debug(cmd) | |
1180 | ret = call(cmd, shell=True) | |
1181 | if ret != 0: | |
1182 | logging.error("Bad exit status {ret} from set-bytes".format(ret=ret)) | |
1183 | ERRORS += 1 | |
1184 | fd = open(TESTNAME, "wb") | |
1185 | cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-bytes -").format(osd=osd, pg=pg, json=JSON) | |
1186 | logging.debug(cmd) | |
1187 | ret = call(cmd, shell=True, stdout=fd) | |
1188 | fd.close() | |
1189 | if ret != 0: | |
1190 | logging.error("Bad exit status {ret} from get-bytes".format(ret=ret)) | |
1191 | ERRORS += 1 | |
1192 | cmd = "diff -q {setfile} {testfile}".format(setfile=SETNAME, testfile=TESTNAME) | |
1193 | logging.debug(cmd) | |
1194 | ret = call(cmd, shell=True) | |
1195 | if ret != 0: | |
1196 | logging.error("Data after set-bytes differ") | |
1197 | logging.debug("Got:") | |
1198 | cat_file(logging.DEBUG, TESTNAME) | |
1199 | logging.debug("Expected:") | |
1200 | cat_file(logging.DEBUG, SETNAME) | |
1201 | ERRORS += 1 | |
1202 | ||
1203 | # Use set-bytes with --dry-run and make sure contents haven't changed | |
1204 | fd = open(BADNAME, "w") | |
1205 | data = "Bad data for --dry-run in {file}\n".format(file=file) | |
1206 | fd.write(data) | |
1207 | fd.close() | |
1208 | cmd = (CFSD_PREFIX + "--dry-run --pgid {pg} '{json}' set-bytes {sname}").format(osd=osd, pg=pg, json=JSON, sname=BADNAME) | |
1209 | logging.debug(cmd) | |
1210 | ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd) | |
1211 | if ret != 0: | |
1212 | logging.error("Bad exit status {ret} from set-bytes --dry-run".format(ret=ret)) | |
1213 | ERRORS += 1 | |
1214 | fd = open(TESTNAME, "wb") | |
1215 | cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-bytes -").format(osd=osd, pg=pg, json=JSON) | |
1216 | logging.debug(cmd) | |
1217 | ret = call(cmd, shell=True, stdout=fd) | |
1218 | fd.close() | |
1219 | if ret != 0: | |
1220 | logging.error("Bad exit status {ret} from get-bytes".format(ret=ret)) | |
1221 | ERRORS += 1 | |
1222 | cmd = "diff -q {setfile} {testfile}".format(setfile=SETNAME, testfile=TESTNAME) | |
1223 | logging.debug(cmd) | |
1224 | ret = call(cmd, shell=True) | |
1225 | if ret != 0: | |
1226 | logging.error("Data after set-bytes --dry-run changed!") | |
1227 | logging.debug("Got:") | |
1228 | cat_file(logging.DEBUG, TESTNAME) | |
1229 | logging.debug("Expected:") | |
1230 | cat_file(logging.DEBUG, SETNAME) | |
1231 | ERRORS += 1 | |
1232 | ||
1233 | fd = open(file, "rb") | |
1234 | cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' set-bytes").format(osd=osd, pg=pg, json=JSON) | |
1235 | logging.debug(cmd) | |
1236 | ret = call(cmd, shell=True, stdin=fd) | |
1237 | if ret != 0: | |
1238 | logging.error("Bad exit status {ret} from set-bytes to restore object".format(ret=ret)) | |
1239 | ERRORS += 1 | |
1240 | fd.close() | |
1241 | ||
1242 | try: | |
1243 | os.unlink(GETNAME) | |
1244 | except: | |
1245 | pass | |
1246 | try: | |
1247 | os.unlink(TESTNAME) | |
1248 | except: | |
1249 | pass | |
1250 | try: | |
1251 | os.unlink(SETNAME) | |
1252 | except: | |
1253 | pass | |
1254 | try: | |
1255 | os.unlink(BADNAME) | |
1256 | except: | |
1257 | pass | |
1258 | ||
1259 | # Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap | |
1260 | print("Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap") | |
1261 | for nspace in db.keys(): | |
1262 | for basename in db[nspace].keys(): | |
1263 | file = os.path.join(DATADIR, nspace + "-" + basename + "__head") | |
1264 | JSON = db[nspace][basename]['json'] | |
1265 | for pg in OBJREPPGS: | |
1266 | OSDS = get_osds(pg, OSDDIR) | |
1267 | for osd in OSDS: | |
1268 | DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg)))) | |
1269 | fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f)) | |
1270 | and f.split("_")[0] == basename and f.split("_")[4] == nspace] | |
1271 | if not fnames: | |
1272 | continue | |
1273 | for key, val in db[nspace][basename]["xattr"].items(): | |
1274 | attrkey = "_" + key | |
1275 | cmd = (CFSD_PREFIX + " '{json}' get-attr {key}").format(osd=osd, json=JSON, key=attrkey) | |
1276 | logging.debug(cmd) | |
1277 | getval = check_output(cmd, shell=True) | |
1278 | if getval != val: | |
1279 | logging.error("get-attr of key {key} returned wrong val: {get} instead of {orig}".format(key=attrkey, get=getval, orig=val)) | |
1280 | ERRORS += 1 | |
1281 | continue | |
1282 | # set-attr to bogus value "foobar" | |
1283 | cmd = ("echo -n foobar | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey) | |
1284 | logging.debug(cmd) | |
1285 | ret = call(cmd, shell=True) | |
1286 | if ret != 0: | |
1287 | logging.error("Bad exit status {ret} from set-attr".format(ret=ret)) | |
1288 | ERRORS += 1 | |
1289 | continue | |
1290 | # Test set-attr with dry-run | |
1291 | cmd = ("echo -n dryrunbroken | " + CFSD_PREFIX + "--dry-run '{json}' set-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey) | |
1292 | logging.debug(cmd) | |
1293 | ret = call(cmd, shell=True, stdout=nullfd) | |
1294 | if ret != 0: | |
1295 | logging.error("Bad exit status {ret} from set-attr".format(ret=ret)) | |
1296 | ERRORS += 1 | |
1297 | continue | |
1298 | # Check the set-attr | |
1299 | cmd = (CFSD_PREFIX + " --pgid {pg} '{json}' get-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey) | |
1300 | logging.debug(cmd) | |
1301 | getval = check_output(cmd, shell=True) | |
1302 | if ret != 0: | |
1303 | logging.error("Bad exit status {ret} from get-attr".format(ret=ret)) | |
1304 | ERRORS += 1 | |
1305 | continue | |
1306 | if getval != "foobar": | |
1307 | logging.error("Check of set-attr failed because we got {val}".format(val=getval)) | |
1308 | ERRORS += 1 | |
1309 | continue | |
1310 | # Test rm-attr | |
1311 | cmd = (CFSD_PREFIX + "'{json}' rm-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey) | |
1312 | logging.debug(cmd) | |
1313 | ret = call(cmd, shell=True) | |
1314 | if ret != 0: | |
1315 | logging.error("Bad exit status {ret} from rm-attr".format(ret=ret)) | |
1316 | ERRORS += 1 | |
1317 | continue | |
1318 | # Check rm-attr with dry-run | |
1319 | cmd = (CFSD_PREFIX + "--dry-run '{json}' rm-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey) | |
1320 | logging.debug(cmd) | |
1321 | ret = call(cmd, shell=True, stdout=nullfd) | |
1322 | if ret != 0: | |
1323 | logging.error("Bad exit status {ret} from rm-attr".format(ret=ret)) | |
1324 | ERRORS += 1 | |
1325 | continue | |
1326 | cmd = (CFSD_PREFIX + "'{json}' get-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey) | |
1327 | logging.debug(cmd) | |
1328 | ret = call(cmd, shell=True, stderr=nullfd, stdout=nullfd) | |
1329 | if ret == 0: | |
1330 | logging.error("For rm-attr expect get-attr to fail, but it succeeded") | |
1331 | ERRORS += 1 | |
1332 | # Put back value | |
1333 | cmd = ("echo -n {val} | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey, val=val) | |
1334 | logging.debug(cmd) | |
1335 | ret = call(cmd, shell=True) | |
1336 | if ret != 0: | |
1337 | logging.error("Bad exit status {ret} from set-attr".format(ret=ret)) | |
1338 | ERRORS += 1 | |
1339 | continue | |
1340 | ||
1341 | hdr = db[nspace][basename].get("omapheader", "") | |
1342 | cmd = (CFSD_PREFIX + "'{json}' get-omaphdr").format(osd=osd, json=JSON) | |
1343 | logging.debug(cmd) | |
1344 | gethdr = check_output(cmd, shell=True) | |
1345 | if gethdr != hdr: | |
1346 | logging.error("get-omaphdr was wrong: {get} instead of {orig}".format(get=gethdr, orig=hdr)) | |
1347 | ERRORS += 1 | |
1348 | continue | |
1349 | # set-omaphdr to bogus value "foobar" | |
1350 | cmd = ("echo -n foobar | " + CFSD_PREFIX + "'{json}' set-omaphdr").format(osd=osd, pg=pg, json=JSON) | |
1351 | logging.debug(cmd) | |
1352 | ret = call(cmd, shell=True) | |
1353 | if ret != 0: | |
1354 | logging.error("Bad exit status {ret} from set-omaphdr".format(ret=ret)) | |
1355 | ERRORS += 1 | |
1356 | continue | |
1357 | # Check the set-omaphdr | |
1358 | cmd = (CFSD_PREFIX + "'{json}' get-omaphdr").format(osd=osd, pg=pg, json=JSON) | |
1359 | logging.debug(cmd) | |
1360 | gethdr = check_output(cmd, shell=True) | |
1361 | if ret != 0: | |
1362 | logging.error("Bad exit status {ret} from get-omaphdr".format(ret=ret)) | |
1363 | ERRORS += 1 | |
1364 | continue | |
1365 | if gethdr != "foobar": | |
1366 | logging.error("Check of set-omaphdr failed because we got {val}".format(val=getval)) | |
1367 | ERRORS += 1 | |
1368 | continue | |
1369 | # Test dry-run with set-omaphdr | |
1370 | cmd = ("echo -n dryrunbroken | " + CFSD_PREFIX + "--dry-run '{json}' set-omaphdr").format(osd=osd, pg=pg, json=JSON) | |
1371 | logging.debug(cmd) | |
1372 | ret = call(cmd, shell=True, stdout=nullfd) | |
1373 | if ret != 0: | |
1374 | logging.error("Bad exit status {ret} from set-omaphdr".format(ret=ret)) | |
1375 | ERRORS += 1 | |
1376 | continue | |
1377 | # Put back value | |
1378 | cmd = ("echo -n {val} | " + CFSD_PREFIX + "'{json}' set-omaphdr").format(osd=osd, pg=pg, json=JSON, val=hdr) | |
1379 | logging.debug(cmd) | |
1380 | ret = call(cmd, shell=True) | |
1381 | if ret != 0: | |
1382 | logging.error("Bad exit status {ret} from set-omaphdr".format(ret=ret)) | |
1383 | ERRORS += 1 | |
1384 | continue | |
1385 | ||
1386 | for omapkey, val in db[nspace][basename]["omap"].items(): | |
1387 | cmd = (CFSD_PREFIX + " '{json}' get-omap {key}").format(osd=osd, json=JSON, key=omapkey) | |
1388 | logging.debug(cmd) | |
1389 | getval = check_output(cmd, shell=True) | |
1390 | if getval != val: | |
1391 | logging.error("get-omap of key {key} returned wrong val: {get} instead of {orig}".format(key=omapkey, get=getval, orig=val)) | |
1392 | ERRORS += 1 | |
1393 | continue | |
1394 | # set-omap to bogus value "foobar" | |
1395 | cmd = ("echo -n foobar | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey) | |
1396 | logging.debug(cmd) | |
1397 | ret = call(cmd, shell=True) | |
1398 | if ret != 0: | |
1399 | logging.error("Bad exit status {ret} from set-omap".format(ret=ret)) | |
1400 | ERRORS += 1 | |
1401 | continue | |
1402 | # Check set-omap with dry-run | |
1403 | cmd = ("echo -n dryrunbroken | " + CFSD_PREFIX + "--dry-run --pgid {pg} '{json}' set-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey) | |
1404 | logging.debug(cmd) | |
1405 | ret = call(cmd, shell=True, stdout=nullfd) | |
1406 | if ret != 0: | |
1407 | logging.error("Bad exit status {ret} from set-omap".format(ret=ret)) | |
1408 | ERRORS += 1 | |
1409 | continue | |
1410 | # Check the set-omap | |
1411 | cmd = (CFSD_PREFIX + " --pgid {pg} '{json}' get-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey) | |
1412 | logging.debug(cmd) | |
1413 | getval = check_output(cmd, shell=True) | |
1414 | if ret != 0: | |
1415 | logging.error("Bad exit status {ret} from get-omap".format(ret=ret)) | |
1416 | ERRORS += 1 | |
1417 | continue | |
1418 | if getval != "foobar": | |
1419 | logging.error("Check of set-omap failed because we got {val}".format(val=getval)) | |
1420 | ERRORS += 1 | |
1421 | continue | |
1422 | # Test rm-omap | |
1423 | cmd = (CFSD_PREFIX + "'{json}' rm-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey) | |
1424 | logging.debug(cmd) | |
1425 | ret = call(cmd, shell=True) | |
1426 | if ret != 0: | |
1427 | logging.error("Bad exit status {ret} from rm-omap".format(ret=ret)) | |
1428 | ERRORS += 1 | |
1429 | # Check rm-omap with dry-run | |
1430 | cmd = (CFSD_PREFIX + "--dry-run '{json}' rm-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey) | |
1431 | logging.debug(cmd) | |
1432 | ret = call(cmd, shell=True, stdout=nullfd) | |
1433 | if ret != 0: | |
1434 | logging.error("Bad exit status {ret} from rm-omap".format(ret=ret)) | |
1435 | ERRORS += 1 | |
1436 | cmd = (CFSD_PREFIX + "'{json}' get-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey) | |
1437 | logging.debug(cmd) | |
1438 | ret = call(cmd, shell=True, stderr=nullfd, stdout=nullfd) | |
1439 | if ret == 0: | |
1440 | logging.error("For rm-omap expect get-omap to fail, but it succeeded") | |
1441 | ERRORS += 1 | |
1442 | # Put back value | |
1443 | cmd = ("echo -n {val} | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey, val=val) | |
1444 | logging.debug(cmd) | |
1445 | ret = call(cmd, shell=True) | |
1446 | if ret != 0: | |
1447 | logging.error("Bad exit status {ret} from set-omap".format(ret=ret)) | |
1448 | ERRORS += 1 | |
1449 | continue | |
1450 | ||
1451 | # Test dump | |
1452 | print("Test dump") | |
1453 | for nspace in db.keys(): | |
1454 | for basename in db[nspace].keys(): | |
1455 | file = os.path.join(DATADIR, nspace + "-" + basename + "__head") | |
1456 | JSON = db[nspace][basename]['json'] | |
1457 | GETNAME = "/tmp/getbytes.{pid}".format(pid=pid) | |
1458 | for pg in OBJREPPGS: | |
1459 | OSDS = get_osds(pg, OSDDIR) | |
1460 | for osd in OSDS: | |
1461 | DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg)))) | |
1462 | fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f)) | |
1463 | and f.split("_")[0] == basename and f.split("_")[4] == nspace] | |
1464 | if not fnames: | |
1465 | continue | |
1466 | if int(basename.split(REP_NAME)[1]) > int(NUM_CLONED_REP_OBJECTS): | |
1467 | continue | |
1468 | cmd = (CFSD_PREFIX + " '{json}' dump | grep '\"snap\": 1,' > /dev/null").format(osd=osd, json=JSON) | |
1469 | logging.debug(cmd) | |
1470 | ret = call(cmd, shell=True) | |
1471 | if ret != 0: | |
1472 | logging.error("Invalid dump for {json}".format(json=JSON)) | |
1473 | ERRORS += 1 | |
1474 | ||
1475 | print("Test list-attrs get-attr") | |
1476 | ATTRFILE = r"/tmp/attrs.{pid}".format(pid=pid) | |
1477 | VALFILE = r"/tmp/val.{pid}".format(pid=pid) | |
1478 | for nspace in db.keys(): | |
1479 | for basename in db[nspace].keys(): | |
1480 | file = os.path.join(DATADIR, nspace + "-" + basename) | |
1481 | JSON = db[nspace][basename]['json'] | |
1482 | jsondict = json.loads(JSON) | |
1483 | ||
1484 | if 'shard_id' in jsondict: | |
1485 | logging.debug("ECobject " + JSON) | |
1486 | found = 0 | |
1487 | for pg in OBJECPGS: | |
1488 | OSDS = get_osds(pg, OSDDIR) | |
1489 | # Fix shard_id since we only have one json instance for each object | |
1490 | jsondict['shard_id'] = int(pg.split('s')[1]) | |
1491 | JSON = json.dumps(jsondict) | |
1492 | for osd in OSDS: | |
1493 | cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-attr hinfo_key").format(osd=osd, pg=pg, json=JSON) | |
1494 | logging.debug("TRY: " + cmd) | |
1495 | try: | |
1496 | out = check_output(cmd, shell=True, stderr=subprocess.STDOUT) | |
1497 | logging.debug("FOUND: {json} in {osd} has value '{val}'".format(osd=osd, json=JSON, val=out)) | |
1498 | found += 1 | |
1499 | except subprocess.CalledProcessError as e: | |
1500 | if "No such file or directory" not in e.output and "No data available" not in e.output: | |
1501 | raise | |
1502 | # Assuming k=2 m=1 for the default ec pool | |
1503 | if found != 3: | |
1504 | logging.error("{json} hinfo_key found {found} times instead of 3".format(json=JSON, found=found)) | |
1505 | ERRORS += 1 | |
1506 | ||
1507 | for pg in ALLPGS: | |
1508 | # Make sure rep obj with rep pg or ec obj with ec pg | |
1509 | if ('shard_id' in jsondict) != (pg.find('s') > 0): | |
1510 | continue | |
1511 | if 'shard_id' in jsondict: | |
1512 | # Fix shard_id since we only have one json instance for each object | |
1513 | jsondict['shard_id'] = int(pg.split('s')[1]) | |
1514 | JSON = json.dumps(jsondict) | |
1515 | OSDS = get_osds(pg, OSDDIR) | |
1516 | for osd in OSDS: | |
1517 | DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg)))) | |
1518 | fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f)) | |
1519 | and f.split("_")[0] == basename and f.split("_")[4] == nspace] | |
1520 | if not fnames: | |
1521 | continue | |
1522 | afd = open(ATTRFILE, "wb") | |
1523 | cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' list-attrs").format(osd=osd, pg=pg, json=JSON) | |
1524 | logging.debug(cmd) | |
1525 | ret = call(cmd, shell=True, stdout=afd) | |
1526 | afd.close() | |
1527 | if ret != 0: | |
1528 | logging.error("list-attrs failed with {ret}".format(ret=ret)) | |
1529 | ERRORS += 1 | |
1530 | continue | |
1531 | keys = get_lines(ATTRFILE) | |
1532 | values = dict(db[nspace][basename]["xattr"]) | |
1533 | for key in keys: | |
1534 | if key == "_" or key == "snapset" or key == "hinfo_key": | |
1535 | continue | |
1536 | key = key.strip("_") | |
1537 | if key not in values: | |
1538 | logging.error("Unexpected key {key} present".format(key=key)) | |
1539 | ERRORS += 1 | |
1540 | continue | |
1541 | exp = values.pop(key) | |
1542 | vfd = open(VALFILE, "wb") | |
1543 | cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-attr {key}").format(osd=osd, pg=pg, json=JSON, key="_" + key) | |
1544 | logging.debug(cmd) | |
1545 | ret = call(cmd, shell=True, stdout=vfd) | |
1546 | vfd.close() | |
1547 | if ret != 0: | |
1548 | logging.error("get-attr failed with {ret}".format(ret=ret)) | |
1549 | ERRORS += 1 | |
1550 | continue | |
1551 | lines = get_lines(VALFILE) | |
1552 | val = lines[0] | |
1553 | if exp != val: | |
1554 | logging.error("For key {key} got value {got} instead of {expected}".format(key=key, got=val, expected=exp)) | |
1555 | ERRORS += 1 | |
1556 | if len(values) != 0: | |
1557 | logging.error("Not all keys found, remaining keys:") | |
1558 | print(values) | |
1559 | ||
1560 | print("Test --op meta-list") | |
1561 | tmpfd = open(TMPFILE, "wb") | |
1562 | cmd = (CFSD_PREFIX + "--op meta-list").format(osd=ONEOSD) | |
1563 | logging.debug(cmd) | |
1564 | ret = call(cmd, shell=True, stdout=tmpfd) | |
1565 | if ret != 0: | |
1566 | logging.error("Bad exit status {ret} from --op meta-list request".format(ret=ret)) | |
1567 | ERRORS += 1 | |
1568 | ||
1569 | print("Test get-bytes on meta") | |
1570 | tmpfd.close() | |
1571 | lines = get_lines(TMPFILE) | |
1572 | JSONOBJ = sorted(set(lines)) | |
1573 | for JSON in JSONOBJ: | |
1574 | (pgid, jsondict) = json.loads(JSON) | |
1575 | if pgid != "meta": | |
1576 | logging.error("pgid incorrect for --op meta-list {pgid}".format(pgid=pgid)) | |
1577 | ERRORS += 1 | |
1578 | if jsondict['namespace'] != "": | |
1579 | logging.error("namespace non null --op meta-list {ns}".format(ns=jsondict['namespace'])) | |
1580 | ERRORS += 1 | |
1581 | logging.info(JSON) | |
1582 | try: | |
1583 | os.unlink(GETNAME) | |
1584 | except: | |
1585 | pass | |
1586 | cmd = (CFSD_PREFIX + "'{json}' get-bytes {fname}").format(osd=ONEOSD, json=JSON, fname=GETNAME) | |
1587 | logging.debug(cmd) | |
1588 | ret = call(cmd, shell=True) | |
1589 | if ret != 0: | |
1590 | logging.error("Bad exit status {ret}".format(ret=ret)) | |
1591 | ERRORS += 1 | |
1592 | ||
1593 | try: | |
1594 | os.unlink(GETNAME) | |
1595 | except: | |
1596 | pass | |
1597 | try: | |
1598 | os.unlink(TESTNAME) | |
1599 | except: | |
1600 | pass | |
1601 | ||
1602 | print("Test pg info") | |
1603 | for pg in ALLREPPGS + ALLECPGS: | |
1604 | for osd in get_osds(pg, OSDDIR): | |
1605 | cmd = (CFSD_PREFIX + "--op info --pgid {pg} | grep '\"pgid\": \"{pg}\"'").format(osd=osd, pg=pg) | |
1606 | logging.debug(cmd) | |
1607 | ret = call(cmd, shell=True, stdout=nullfd) | |
1608 | if ret != 0: | |
1609 | logging.error("Getting info failed for pg {pg} from {osd} with {ret}".format(pg=pg, osd=osd, ret=ret)) | |
1610 | ERRORS += 1 | |
1611 | ||
1612 | print("Test pg logging") | |
1613 | if len(ALLREPPGS + ALLECPGS) == len(OBJREPPGS + OBJECPGS): | |
1614 | logging.warning("All PGs have objects, so no log without modify entries") | |
1615 | for pg in ALLREPPGS + ALLECPGS: | |
1616 | for osd in get_osds(pg, OSDDIR): | |
1617 | tmpfd = open(TMPFILE, "wb") | |
1618 | cmd = (CFSD_PREFIX + "--op log --pgid {pg}").format(osd=osd, pg=pg) | |
1619 | logging.debug(cmd) | |
1620 | ret = call(cmd, shell=True, stdout=tmpfd) | |
1621 | if ret != 0: | |
1622 | logging.error("Getting log failed for pg {pg} from {osd} with {ret}".format(pg=pg, osd=osd, ret=ret)) | |
1623 | ERRORS += 1 | |
1624 | HASOBJ = pg in OBJREPPGS + OBJECPGS | |
1625 | MODOBJ = False | |
1626 | for line in get_lines(TMPFILE): | |
1627 | if line.find("modify") != -1: | |
1628 | MODOBJ = True | |
1629 | break | |
1630 | if HASOBJ != MODOBJ: | |
1631 | logging.error("Bad log for pg {pg} from {osd}".format(pg=pg, osd=osd)) | |
1632 | MSG = (HASOBJ and [""] or ["NOT "])[0] | |
1633 | print("Log should {msg}have a modify entry".format(msg=MSG)) | |
1634 | ERRORS += 1 | |
1635 | ||
1636 | try: | |
1637 | os.unlink(TMPFILE) | |
1638 | except: | |
1639 | pass | |
1640 | ||
1641 | print("Test list-pgs") | |
1642 | for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]: | |
1643 | ||
1644 | CHECK_PGS = get_osd_pgs(os.path.join(OSDDIR, osd), None) | |
1645 | CHECK_PGS = sorted(CHECK_PGS) | |
1646 | ||
1647 | cmd = (CFSD_PREFIX + "--op list-pgs").format(osd=osd) | |
1648 | logging.debug(cmd) | |
1649 | TEST_PGS = check_output(cmd, shell=True).split("\n") | |
1650 | TEST_PGS = sorted(TEST_PGS)[1:] # Skip extra blank line | |
1651 | ||
1652 | if TEST_PGS != CHECK_PGS: | |
1653 | logging.error("list-pgs got wrong result for osd.{osd}".format(osd=osd)) | |
1654 | logging.error("Expected {pgs}".format(pgs=CHECK_PGS)) | |
1655 | logging.error("Got {pgs}".format(pgs=TEST_PGS)) | |
1656 | ERRORS += 1 | |
1657 | ||
1658 | EXP_ERRORS = 0 | |
1659 | print("Test pg export --dry-run") | |
1660 | pg = ALLREPPGS[0] | |
1661 | osd = get_osds(pg, OSDDIR)[0] | |
1662 | fname = "/tmp/fname.{pid}".format(pid=pid) | |
1663 | cmd = (CFSD_PREFIX + "--dry-run --op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname) | |
1664 | logging.debug(cmd) | |
1665 | ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd) | |
1666 | if ret != 0: | |
1667 | logging.error("Exporting --dry-run failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret)) | |
1668 | EXP_ERRORS += 1 | |
1669 | elif os.path.exists(fname): | |
1670 | logging.error("Exporting --dry-run created file") | |
1671 | EXP_ERRORS += 1 | |
1672 | ||
1673 | cmd = (CFSD_PREFIX + "--dry-run --op export --pgid {pg} > {file}").format(osd=osd, pg=pg, file=fname) | |
1674 | logging.debug(cmd) | |
1675 | ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd) | |
1676 | if ret != 0: | |
1677 | logging.error("Exporting --dry-run failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret)) | |
1678 | EXP_ERRORS += 1 | |
1679 | else: | |
1680 | outdata = get_lines(fname) | |
1681 | if len(outdata) > 0: | |
1682 | logging.error("Exporting --dry-run to stdout not empty") | |
1683 | logging.error("Data: " + outdata) | |
1684 | EXP_ERRORS += 1 | |
1685 | ||
1686 | os.mkdir(TESTDIR) | |
1687 | for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]: | |
1688 | os.mkdir(os.path.join(TESTDIR, osd)) | |
1689 | print("Test pg export") | |
1690 | for pg in ALLREPPGS + ALLECPGS: | |
1691 | for osd in get_osds(pg, OSDDIR): | |
1692 | mydir = os.path.join(TESTDIR, osd) | |
1693 | fname = os.path.join(mydir, pg) | |
1694 | if pg == ALLREPPGS[0]: | |
1695 | cmd = (CFSD_PREFIX + "--op export --pgid {pg} > {file}").format(osd=osd, pg=pg, file=fname) | |
1696 | elif pg == ALLREPPGS[1]: | |
1697 | cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file - > {file}").format(osd=osd, pg=pg, file=fname) | |
1698 | else: | |
1699 | cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname) | |
1700 | logging.debug(cmd) | |
1701 | ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd) | |
1702 | if ret != 0: | |
1703 | logging.error("Exporting failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret)) | |
1704 | EXP_ERRORS += 1 | |
1705 | ||
1706 | ERRORS += EXP_ERRORS | |
1707 | ||
1708 | print("Test pg removal") | |
1709 | RM_ERRORS = 0 | |
1710 | for pg in ALLREPPGS + ALLECPGS: | |
1711 | for osd in get_osds(pg, OSDDIR): | |
1712 | # This should do nothing | |
1713 | cmd = (CFSD_PREFIX + "--op remove --pgid {pg} --dry-run").format(pg=pg, osd=osd) | |
1714 | logging.debug(cmd) | |
1715 | ret = call(cmd, shell=True, stdout=nullfd) | |
1716 | if ret != 0: | |
1717 | logging.error("Removing --dry-run failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret)) | |
1718 | RM_ERRORS += 1 | |
1719 | cmd = (CFSD_PREFIX + "--op remove --pgid {pg}").format(pg=pg, osd=osd) | |
1720 | logging.debug(cmd) | |
1721 | ret = call(cmd, shell=True, stdout=nullfd) | |
1722 | if ret != 0: | |
1723 | logging.error("Removing failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret)) | |
1724 | RM_ERRORS += 1 | |
1725 | ||
1726 | ERRORS += RM_ERRORS | |
1727 | ||
1728 | IMP_ERRORS = 0 | |
1729 | if EXP_ERRORS == 0 and RM_ERRORS == 0: | |
1730 | print("Test pg import") | |
1731 | for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]: | |
1732 | dir = os.path.join(TESTDIR, osd) | |
1733 | PGS = [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))] | |
1734 | for pg in PGS: | |
1735 | file = os.path.join(dir, pg) | |
1736 | # This should do nothing | |
1737 | cmd = (CFSD_PREFIX + "--op import --file {file} --dry-run").format(osd=osd, file=file) | |
1738 | logging.debug(cmd) | |
1739 | ret = call(cmd, shell=True, stdout=nullfd) | |
1740 | if ret != 0: | |
1741 | logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret)) | |
1742 | IMP_ERRORS += 1 | |
1743 | if pg == PGS[0]: | |
1744 | cmd = ("cat {file} |".format(file=file) + CFSD_PREFIX + "--op import").format(osd=osd) | |
1745 | elif pg == PGS[1]: | |
1746 | cmd = (CFSD_PREFIX + "--op import --file - --pgid {pg} < {file}").format(osd=osd, file=file, pg=pg) | |
1747 | else: | |
1748 | cmd = (CFSD_PREFIX + "--op import --file {file}").format(osd=osd, file=file) | |
1749 | logging.debug(cmd) | |
1750 | ret = call(cmd, shell=True, stdout=nullfd) | |
1751 | if ret != 0: | |
1752 | logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret)) | |
1753 | IMP_ERRORS += 1 | |
1754 | else: | |
1755 | logging.warning("SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES") | |
1756 | ||
1757 | ERRORS += IMP_ERRORS | |
1758 | logging.debug(cmd) | |
1759 | ||
1760 | if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0: | |
1761 | print("Verify replicated import data") | |
1762 | data_errors, _ = check_data(DATADIR, TMPFILE, OSDDIR, REP_NAME) | |
1763 | ERRORS += data_errors | |
1764 | else: | |
1765 | logging.warning("SKIPPING CHECKING IMPORT DATA DUE TO PREVIOUS FAILURES") | |
1766 | ||
1767 | print("Test all --op dump-journal again") | |
1768 | ALLOSDS = [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0] | |
1769 | ERRORS += test_dump_journal(CFSD_PREFIX, ALLOSDS) | |
1770 | ||
1771 | vstart(new=False) | |
1772 | wait_for_health() | |
1773 | ||
1774 | if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0: | |
1775 | print("Verify erasure coded import data") | |
1776 | ERRORS += verify(DATADIR, EC_POOL, EC_NAME, db) | |
1777 | # Check replicated data/xattr/omap using rados | |
1778 | print("Verify replicated import data using rados") | |
1779 | ERRORS += verify(DATADIR, REP_POOL, REP_NAME, db) | |
1780 | ||
1781 | if EXP_ERRORS == 0: | |
1782 | NEWPOOL = "rados-import-pool" | |
1783 | cmd = "{path}/rados mkpool {pool}".format(pool=NEWPOOL, path=CEPH_BIN) | |
1784 | logging.debug(cmd) | |
1785 | ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd) | |
1786 | ||
1787 | print("Test rados import") | |
1788 | first = True | |
1789 | for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]: | |
1790 | dir = os.path.join(TESTDIR, osd) | |
1791 | for pg in [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))]: | |
1792 | if pg.find("{id}.".format(id=REPID)) != 0: | |
1793 | continue | |
1794 | file = os.path.join(dir, pg) | |
1795 | if first: | |
1796 | first = False | |
1797 | # This should do nothing | |
1798 | cmd = "{path}/rados import -p {pool} --dry-run {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN) | |
1799 | logging.debug(cmd) | |
1800 | ret = call(cmd, shell=True, stdout=nullfd) | |
1801 | if ret != 0: | |
1802 | logging.error("Rados import --dry-run failed from {file} with {ret}".format(file=file, ret=ret)) | |
1803 | ERRORS += 1 | |
1804 | cmd = "{path}/rados -p {pool} ls".format(pool=NEWPOOL, path=CEPH_BIN) | |
1805 | logging.debug(cmd) | |
1806 | data = check_output(cmd, shell=True) | |
1807 | if data: | |
1808 | logging.error("'{data}'".format(data=data)) | |
1809 | logging.error("Found objects after dry-run") | |
1810 | ERRORS += 1 | |
1811 | cmd = "{path}/rados import -p {pool} {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN) | |
1812 | logging.debug(cmd) | |
1813 | ret = call(cmd, shell=True, stdout=nullfd) | |
1814 | if ret != 0: | |
1815 | logging.error("Rados import failed from {file} with {ret}".format(file=file, ret=ret)) | |
1816 | ERRORS += 1 | |
1817 | cmd = "{path}/rados import -p {pool} --no-overwrite {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN) | |
1818 | logging.debug(cmd) | |
1819 | ret = call(cmd, shell=True, stdout=nullfd) | |
1820 | if ret != 0: | |
1821 | logging.error("Rados import --no-overwrite failed from {file} with {ret}".format(file=file, ret=ret)) | |
1822 | ERRORS += 1 | |
1823 | ||
1824 | ERRORS += verify(DATADIR, NEWPOOL, REP_NAME, db) | |
1825 | else: | |
1826 | logging.warning("SKIPPING IMPORT-RADOS TESTS DUE TO PREVIOUS FAILURES") | |
1827 | ||
1828 | # Clear directories of previous portion | |
1829 | call("/bin/rm -rf {dir}".format(dir=TESTDIR), shell=True) | |
1830 | call("/bin/rm -rf {dir}".format(dir=DATADIR), shell=True) | |
1831 | os.mkdir(TESTDIR) | |
1832 | os.mkdir(DATADIR) | |
1833 | ||
1834 | # Cause SPLIT_POOL to split and test import with object/log filtering | |
1835 | print("Testing import all objects after a split") | |
1836 | SPLIT_POOL = "split_pool" | |
1837 | PG_COUNT = 1 | |
1838 | SPLIT_OBJ_COUNT = 5 | |
1839 | SPLIT_NSPACE_COUNT = 2 | |
1840 | SPLIT_NAME = "split" | |
1841 | cmd = "{path}/ceph osd pool create {pool} {pg} {pg} replicated".format(pool=SPLIT_POOL, pg=PG_COUNT, path=CEPH_BIN) | |
1842 | logging.debug(cmd) | |
1843 | call(cmd, shell=True, stdout=nullfd, stderr=nullfd) | |
1844 | SPLITID = get_pool_id(SPLIT_POOL, nullfd) | |
1845 | pool_size = int(check_output("{path}/ceph osd pool get {pool} size".format(pool=SPLIT_POOL, path=CEPH_BIN), shell=True, stderr=nullfd).split(" ")[1]) | |
1846 | EXP_ERRORS = 0 | |
1847 | RM_ERRORS = 0 | |
1848 | IMP_ERRORS = 0 | |
1849 | ||
1850 | objects = range(1, SPLIT_OBJ_COUNT + 1) | |
1851 | nspaces = range(SPLIT_NSPACE_COUNT) | |
1852 | for n in nspaces: | |
1853 | nspace = get_nspace(n) | |
1854 | ||
1855 | for i in objects: | |
1856 | NAME = SPLIT_NAME + "{num}".format(num=i) | |
1857 | LNAME = nspace + "-" + NAME | |
1858 | DDNAME = os.path.join(DATADIR, LNAME) | |
1859 | DDNAME += "__head" | |
1860 | ||
1861 | cmd = "rm -f " + DDNAME | |
1862 | logging.debug(cmd) | |
1863 | call(cmd, shell=True) | |
1864 | ||
1865 | if i == 1: | |
1866 | dataline = range(DATALINECOUNT) | |
1867 | else: | |
1868 | dataline = range(1) | |
1869 | fd = open(DDNAME, "w") | |
1870 | data = "This is the split data for " + LNAME + "\n" | |
1871 | for _ in dataline: | |
1872 | fd.write(data) | |
1873 | fd.close() | |
1874 | ||
1875 | cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=SPLIT_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN) | |
1876 | logging.debug(cmd) | |
1877 | ret = call(cmd, shell=True, stderr=nullfd) | |
1878 | if ret != 0: | |
1879 | logging.critical("Rados put command failed with {ret}".format(ret=ret)) | |
1880 | return 1 | |
1881 | ||
1882 | wait_for_health() | |
1883 | kill_daemons() | |
1884 | ||
1885 | for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]: | |
1886 | os.mkdir(os.path.join(TESTDIR, osd)) | |
1887 | ||
1888 | pg = "{pool}.0".format(pool=SPLITID) | |
1889 | EXPORT_PG = pg | |
1890 | ||
1891 | export_osds = get_osds(pg, OSDDIR) | |
1892 | for osd in export_osds: | |
1893 | mydir = os.path.join(TESTDIR, osd) | |
1894 | fname = os.path.join(mydir, pg) | |
1895 | cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname) | |
1896 | logging.debug(cmd) | |
1897 | ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd) | |
1898 | if ret != 0: | |
1899 | logging.error("Exporting failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret)) | |
1900 | EXP_ERRORS += 1 | |
1901 | ||
1902 | ERRORS += EXP_ERRORS | |
1903 | ||
1904 | if EXP_ERRORS == 0: | |
1905 | vstart(new=False) | |
1906 | wait_for_health() | |
1907 | ||
1908 | cmd = "{path}/ceph osd pool set {pool} pg_num 2".format(pool=SPLIT_POOL, path=CEPH_BIN) | |
1909 | logging.debug(cmd) | |
1910 | ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd) | |
1911 | time.sleep(5) | |
1912 | wait_for_health() | |
1913 | ||
1914 | kill_daemons() | |
1915 | ||
1916 | # Now 2 PGs, poolid.0 and poolid.1 | |
1917 | for seed in range(2): | |
1918 | pg = "{pool}.{seed}".format(pool=SPLITID, seed=seed) | |
1919 | ||
1920 | which = 0 | |
1921 | for osd in get_osds(pg, OSDDIR): | |
1922 | cmd = (CFSD_PREFIX + "--op remove --pgid {pg}").format(pg=pg, osd=osd) | |
1923 | logging.debug(cmd) | |
1924 | ret = call(cmd, shell=True, stdout=nullfd) | |
1925 | ||
1926 | # This is weird. The export files are based on only the EXPORT_PG | |
1927 | # and where that pg was before the split. Use 'which' to use all | |
1928 | # export copies in import. | |
1929 | mydir = os.path.join(TESTDIR, export_osds[which]) | |
1930 | fname = os.path.join(mydir, EXPORT_PG) | |
1931 | which += 1 | |
1932 | cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname) | |
1933 | logging.debug(cmd) | |
1934 | ret = call(cmd, shell=True, stdout=nullfd) | |
1935 | if ret != 0: | |
1936 | logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret)) | |
1937 | IMP_ERRORS += 1 | |
1938 | ||
1939 | ERRORS += IMP_ERRORS | |
1940 | ||
1941 | # Start up again to make sure imports didn't corrupt anything | |
1942 | if IMP_ERRORS == 0: | |
1943 | print("Verify split import data") | |
1944 | data_errors, count = check_data(DATADIR, TMPFILE, OSDDIR, SPLIT_NAME) | |
1945 | ERRORS += data_errors | |
1946 | if count != (SPLIT_OBJ_COUNT * SPLIT_NSPACE_COUNT * pool_size): | |
1947 | logging.error("Incorrect number of replicas seen {count}".format(count=count)) | |
1948 | ERRORS += 1 | |
1949 | vstart(new=False) | |
1950 | wait_for_health() | |
1951 | ||
1952 | call("/bin/rm -rf {dir}".format(dir=TESTDIR), shell=True) | |
1953 | call("/bin/rm -rf {dir}".format(dir=DATADIR), shell=True) | |
1954 | ||
1955 | ERRORS += test_removeall(CFSD_PREFIX, db, OBJREPPGS, REP_POOL, CEPH_BIN, OSDDIR, REP_NAME, NUM_CLONED_REP_OBJECTS) | |
1956 | ||
1957 | # vstart() starts 4 OSDs | |
1958 | ERRORS += test_get_set_osdmap(CFSD_PREFIX, list(range(4)), ALLOSDS) | |
1959 | ERRORS += test_get_set_inc_osdmap(CFSD_PREFIX, ALLOSDS[0]) | |
1960 | if ERRORS == 0: | |
1961 | print("TEST PASSED") | |
1962 | return 0 | |
1963 | else: | |
1964 | print("TEST FAILED WITH {errcount} ERRORS".format(errcount=ERRORS)) | |
1965 | return 1 | |
1966 | ||
1967 | ||
1968 | def remove_btrfs_subvolumes(path): | |
1969 | if platform.system() == "FreeBSD": | |
1970 | return | |
1971 | result = subprocess.Popen("stat -f -c '%%T' %s" % path, shell=True, stdout=subprocess.PIPE) | |
1972 | for line in result.stdout: | |
1973 | filesystem = decode(line).rstrip('\n') | |
1974 | if filesystem == "btrfs": | |
1975 | result = subprocess.Popen("sudo btrfs subvolume list %s" % path, shell=True, stdout=subprocess.PIPE) | |
1976 | for line in result.stdout: | |
1977 | subvolume = decode(line).split()[8] | |
1978 | # extracting the relative volume name | |
1979 | m = re.search(".*(%s.*)" % path, subvolume) | |
1980 | if m: | |
1981 | found = m.group(1) | |
1982 | call("sudo btrfs subvolume delete %s" % found, shell=True) | |
1983 | ||
1984 | ||
1985 | if __name__ == "__main__": | |
1986 | status = 1 | |
1987 | try: | |
1988 | status = main(sys.argv[1:]) | |
1989 | finally: | |
1990 | kill_daemons() | |
1991 | remove_btrfs_subvolumes(CEPH_DIR) | |
1992 | call("/bin/rm -fr {dir}".format(dir=CEPH_DIR), shell=True) | |
1993 | sys.exit(status) |