]> git.proxmox.com Git - ceph.git/blame - ceph/qa/tasks/ceph_objectstore_tool.py
import 15.2.4
[ceph.git] / ceph / qa / tasks / ceph_objectstore_tool.py
CommitLineData
7c673cae
FG
1"""
2ceph_objectstore_tool - Simple test of ceph-objectstore-tool utility
3"""
9f95a23c
TL
4from io import BytesIO
5
7c673cae 6import contextlib
e306af50 7import json
7c673cae 8import logging
7c673cae 9import os
9f95a23c 10import six
7c673cae
FG
11import sys
12import tempfile
e306af50
TL
13import time
14from tasks import ceph_manager
15from tasks.util.rados import (rados, create_replicated_pool, create_ec_pool)
16from teuthology import misc as teuthology
17from teuthology.orchestra import run
9f95a23c
TL
18
19from teuthology.exceptions import CommandFailedError
20
7c673cae
FG
21# from util.rados import (rados, create_ec_pool,
22# create_replicated_pool,
23# create_cache_pool)
24
25log = logging.getLogger(__name__)
26
27# Should get cluster name "ceph" from somewhere
28# and normal path from osd_data and osd_journal in conf
29FSPATH = "/var/lib/ceph/osd/ceph-{id}"
30JPATH = "/var/lib/ceph/osd/ceph-{id}/journal"
31
32
33def cod_setup_local_data(log, ctx, NUM_OBJECTS, DATADIR,
34 BASE_NAME, DATALINECOUNT):
35 objects = range(1, NUM_OBJECTS + 1)
36 for i in objects:
37 NAME = BASE_NAME + "{num}".format(num=i)
38 LOCALNAME = os.path.join(DATADIR, NAME)
39
40 dataline = range(DATALINECOUNT)
41 fd = open(LOCALNAME, "w")
42 data = "This is the data for " + NAME + "\n"
43 for _ in dataline:
44 fd.write(data)
45 fd.close()
46
47
48def cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR,
49 BASE_NAME, DATALINECOUNT):
50
51 objects = range(1, NUM_OBJECTS + 1)
52 for i in objects:
53 NAME = BASE_NAME + "{num}".format(num=i)
54 DDNAME = os.path.join(DATADIR, NAME)
55
56 remote.run(args=['rm', '-f', DDNAME])
57
58 dataline = range(DATALINECOUNT)
59 data = "This is the data for " + NAME + "\n"
60 DATA = ""
61 for _ in dataline:
62 DATA += data
63 teuthology.write_file(remote, DDNAME, DATA)
64
65
66def cod_setup(log, ctx, remote, NUM_OBJECTS, DATADIR,
67 BASE_NAME, DATALINECOUNT, POOL, db, ec):
68 ERRORS = 0
69 log.info("Creating {objs} objects in pool".format(objs=NUM_OBJECTS))
70
71 objects = range(1, NUM_OBJECTS + 1)
72 for i in objects:
73 NAME = BASE_NAME + "{num}".format(num=i)
74 DDNAME = os.path.join(DATADIR, NAME)
75
76 proc = rados(ctx, remote, ['-p', POOL, 'put', NAME, DDNAME],
77 wait=False)
78 # proc = remote.run(args=['rados', '-p', POOL, 'put', NAME, DDNAME])
79 ret = proc.wait()
80 if ret != 0:
81 log.critical("Rados put failed with status {ret}".
82 format(ret=proc.exitstatus))
83 sys.exit(1)
84
85 db[NAME] = {}
86
87 keys = range(i)
88 db[NAME]["xattr"] = {}
89 for k in keys:
90 if k == 0:
91 continue
92 mykey = "key{i}-{k}".format(i=i, k=k)
93 myval = "val{i}-{k}".format(i=i, k=k)
94 proc = remote.run(args=['rados', '-p', POOL, 'setxattr',
95 NAME, mykey, myval])
96 ret = proc.wait()
97 if ret != 0:
98 log.error("setxattr failed with {ret}".format(ret=ret))
99 ERRORS += 1
100 db[NAME]["xattr"][mykey] = myval
101
102 # Erasure coded pools don't support omap
103 if ec:
104 continue
105
106 # Create omap header in all objects but REPobject1
107 if i != 1:
108 myhdr = "hdr{i}".format(i=i)
109 proc = remote.run(args=['rados', '-p', POOL, 'setomapheader',
110 NAME, myhdr])
111 ret = proc.wait()
112 if ret != 0:
113 log.critical("setomapheader failed with {ret}".format(ret=ret))
114 ERRORS += 1
115 db[NAME]["omapheader"] = myhdr
116
117 db[NAME]["omap"] = {}
118 for k in keys:
119 if k == 0:
120 continue
121 mykey = "okey{i}-{k}".format(i=i, k=k)
122 myval = "oval{i}-{k}".format(i=i, k=k)
123 proc = remote.run(args=['rados', '-p', POOL, 'setomapval',
124 NAME, mykey, myval])
125 ret = proc.wait()
126 if ret != 0:
127 log.critical("setomapval failed with {ret}".format(ret=ret))
128 db[NAME]["omap"][mykey] = myval
129
130 return ERRORS
131
132
133def get_lines(filename):
134 tmpfd = open(filename, "r")
135 line = True
136 lines = []
137 while line:
138 line = tmpfd.readline().rstrip('\n')
139 if line:
140 lines += [line]
141 tmpfd.close()
142 os.unlink(filename)
143 return lines
144
145
146@contextlib.contextmanager
147def task(ctx, config):
148 """
149 Run ceph_objectstore_tool test
150
151 The config should be as follows::
152
153 ceph_objectstore_tool:
154 objects: 20 # <number of objects>
155 pgnum: 12
156 """
157
158 if config is None:
159 config = {}
160 assert isinstance(config, dict), \
161 'ceph_objectstore_tool task only accepts a dict for configuration'
162
163 log.info('Beginning ceph_objectstore_tool...')
164
165 log.debug(config)
166 log.debug(ctx)
167 clients = ctx.cluster.only(teuthology.is_type('client'))
168 assert len(clients.remotes) > 0, 'Must specify at least 1 client'
169 (cli_remote, _) = clients.remotes.popitem()
170 log.debug(cli_remote)
171
172 # clients = dict(teuthology.get_clients(ctx=ctx, roles=config.keys()))
173 # client = clients.popitem()
174 # log.info(client)
175 osds = ctx.cluster.only(teuthology.is_type('osd'))
176 log.info("OSDS")
177 log.info(osds)
178 log.info(osds.remotes)
179
180 manager = ctx.managers['ceph']
181 while (len(manager.get_osd_status()['up']) !=
182 len(manager.get_osd_status()['raw'])):
183 time.sleep(10)
184 while (len(manager.get_osd_status()['in']) !=
185 len(manager.get_osd_status()['up'])):
186 time.sleep(10)
187 manager.raw_cluster_cmd('osd', 'set', 'noout')
188 manager.raw_cluster_cmd('osd', 'set', 'nodown')
189
190 PGNUM = config.get('pgnum', 12)
191 log.info("pgnum: {num}".format(num=PGNUM))
192
193 ERRORS = 0
194
195 REP_POOL = "rep_pool"
196 REP_NAME = "REPobject"
197 create_replicated_pool(cli_remote, REP_POOL, PGNUM)
198 ERRORS += test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME)
199
200 EC_POOL = "ec_pool"
201 EC_NAME = "ECobject"
202 create_ec_pool(cli_remote, EC_POOL, 'default', PGNUM)
203 ERRORS += test_objectstore(ctx, config, cli_remote,
204 EC_POOL, EC_NAME, ec=True)
205
206 if ERRORS == 0:
207 log.info("TEST PASSED")
208 else:
209 log.error("TEST FAILED WITH {errcount} ERRORS".format(errcount=ERRORS))
210
211 assert ERRORS == 0
212
213 try:
214 yield
215 finally:
216 log.info('Ending ceph_objectstore_tool')
217
218
219def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False):
220 manager = ctx.managers['ceph']
221
222 osds = ctx.cluster.only(teuthology.is_type('osd'))
223
224 TEUTHDIR = teuthology.get_testdir(ctx)
225 DATADIR = os.path.join(TEUTHDIR, "ceph.data")
226 DATALINECOUNT = 10000
227 ERRORS = 0
228 NUM_OBJECTS = config.get('objects', 10)
229 log.info("objects: {num}".format(num=NUM_OBJECTS))
230
231 pool_dump = manager.get_pool_dump(REP_POOL)
232 REPID = pool_dump['pool']
233
234 log.debug("repid={num}".format(num=REPID))
235
236 db = {}
237
238 LOCALDIR = tempfile.mkdtemp("cod")
239
240 cod_setup_local_data(log, ctx, NUM_OBJECTS, LOCALDIR,
241 REP_NAME, DATALINECOUNT)
242 allremote = []
243 allremote.append(cli_remote)
e306af50 244 allremote += list(osds.remotes.keys())
7c673cae
FG
245 allremote = list(set(allremote))
246 for remote in allremote:
247 cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR,
248 REP_NAME, DATALINECOUNT)
249
250 ERRORS += cod_setup(log, ctx, cli_remote, NUM_OBJECTS, DATADIR,
251 REP_NAME, DATALINECOUNT, REP_POOL, db, ec)
252
253 pgs = {}
254 for stats in manager.get_pg_stats():
255 if stats["pgid"].find(str(REPID) + ".") != 0:
256 continue
9f95a23c 257 if pool_dump["type"] == ceph_manager.PoolType.REPLICATED:
7c673cae
FG
258 for osd in stats["acting"]:
259 pgs.setdefault(osd, []).append(stats["pgid"])
9f95a23c 260 elif pool_dump["type"] == ceph_manager.PoolType.ERASURE_CODED:
7c673cae
FG
261 shard = 0
262 for osd in stats["acting"]:
263 pgs.setdefault(osd, []).append("{pgid}s{shard}".
264 format(pgid=stats["pgid"],
265 shard=shard))
266 shard += 1
267 else:
268 raise Exception("{pool} has an unexpected type {type}".
269 format(pool=REP_POOL, type=pool_dump["type"]))
270
271 log.info(pgs)
272 log.info(db)
273
274 for osd in manager.get_osd_status()['up']:
275 manager.kill_osd(osd)
276 time.sleep(5)
277
278 pgswithobjects = set()
279 objsinpg = {}
280
281 # Test --op list and generate json for all objects
282 log.info("Test --op list by generating json for all objects")
283 prefix = ("sudo ceph-objectstore-tool "
284 "--data-path {fpath} "
285 "--journal-path {jpath} ").format(fpath=FSPATH, jpath=JPATH)
9f95a23c 286 for remote in osds.remotes.keys():
7c673cae
FG
287 log.debug(remote)
288 log.debug(osds.remotes[remote])
289 for role in osds.remotes[remote]:
e306af50 290 if not role.startswith("osd."):
7c673cae
FG
291 continue
292 osdid = int(role.split('.')[1])
293 log.info("process osd.{id} on {remote}".
294 format(id=osdid, remote=remote))
295 cmd = (prefix + "--op list").format(id=osdid)
9f95a23c
TL
296 try:
297 lines = remote.sh(cmd, check_status=False).splitlines()
298 for pgline in lines:
7c673cae
FG
299 if not pgline:
300 continue
301 (pg, obj) = json.loads(pgline)
302 name = obj['oid']
303 if name in db:
304 pgswithobjects.add(pg)
305 objsinpg.setdefault(pg, []).append(name)
306 db[name].setdefault("pg2json",
307 {})[pg] = json.dumps(obj)
9f95a23c
TL
308 except CommandFailedError as e:
309 log.error("Bad exit status {ret} from --op list request".
310 format(ret=e.exitstatus))
311 ERRORS += 1
7c673cae
FG
312
313 log.info(db)
314 log.info(pgswithobjects)
315 log.info(objsinpg)
316
9f95a23c 317 if pool_dump["type"] == ceph_manager.PoolType.REPLICATED:
7c673cae
FG
318 # Test get-bytes
319 log.info("Test get-bytes and set-bytes")
320 for basename in db.keys():
321 file = os.path.join(DATADIR, basename)
322 GETNAME = os.path.join(DATADIR, "get")
323 SETNAME = os.path.join(DATADIR, "set")
324
9f95a23c 325 for remote in osds.remotes.keys():
7c673cae 326 for role in osds.remotes[remote]:
e306af50 327 if not role.startswith("osd."):
7c673cae
FG
328 continue
329 osdid = int(role.split('.')[1])
330 if osdid not in pgs:
331 continue
332
9f95a23c 333 for pg, JSON in db[basename]["pg2json"].items():
7c673cae
FG
334 if pg in pgs[osdid]:
335 cmd = ((prefix + "--pgid {pg}").
336 format(id=osdid, pg=pg).split())
337 cmd.append(run.Raw("'{json}'".format(json=JSON)))
338 cmd += ("get-bytes {fname}".
339 format(fname=GETNAME).split())
340 proc = remote.run(args=cmd, check_status=False)
341 if proc.exitstatus != 0:
342 remote.run(args="rm -f {getfile}".
343 format(getfile=GETNAME).split())
344 log.error("Bad exit status {ret}".
345 format(ret=proc.exitstatus))
346 ERRORS += 1
347 continue
348 cmd = ("diff -q {file} {getfile}".
349 format(file=file, getfile=GETNAME))
350 proc = remote.run(args=cmd.split())
351 if proc.exitstatus != 0:
352 log.error("Data from get-bytes differ")
353 # log.debug("Got:")
354 # cat_file(logging.DEBUG, GETNAME)
355 # log.debug("Expected:")
356 # cat_file(logging.DEBUG, file)
357 ERRORS += 1
358 remote.run(args="rm -f {getfile}".
359 format(getfile=GETNAME).split())
360
361 data = ("put-bytes going into {file}\n".
362 format(file=file))
363 teuthology.write_file(remote, SETNAME, data)
364 cmd = ((prefix + "--pgid {pg}").
365 format(id=osdid, pg=pg).split())
366 cmd.append(run.Raw("'{json}'".format(json=JSON)))
367 cmd += ("set-bytes {fname}".
368 format(fname=SETNAME).split())
369 proc = remote.run(args=cmd, check_status=False)
370 proc.wait()
371 if proc.exitstatus != 0:
372 log.info("set-bytes failed for object {obj} "
373 "in pg {pg} osd.{id} ret={ret}".
374 format(obj=basename, pg=pg,
375 id=osdid, ret=proc.exitstatus))
376 ERRORS += 1
377
378 cmd = ((prefix + "--pgid {pg}").
379 format(id=osdid, pg=pg).split())
380 cmd.append(run.Raw("'{json}'".format(json=JSON)))
381 cmd += "get-bytes -".split()
9f95a23c
TL
382 try:
383 output = remote.sh(cmd, wait=True)
384 if data != output:
7c673cae
FG
385 log.error("Data inconsistent after "
386 "set-bytes, got:")
9f95a23c 387 log.error(output)
7c673cae 388 ERRORS += 1
9f95a23c
TL
389 except CommandFailedError as e:
390 log.error("get-bytes after "
391 "set-bytes ret={ret}".
392 format(ret=e.exitstatus))
393 ERRORS += 1
7c673cae
FG
394
395 cmd = ((prefix + "--pgid {pg}").
396 format(id=osdid, pg=pg).split())
397 cmd.append(run.Raw("'{json}'".format(json=JSON)))
398 cmd += ("set-bytes {fname}".
399 format(fname=file).split())
400 proc = remote.run(args=cmd, check_status=False)
401 proc.wait()
402 if proc.exitstatus != 0:
403 log.info("set-bytes failed for object {obj} "
404 "in pg {pg} osd.{id} ret={ret}".
405 format(obj=basename, pg=pg,
406 id=osdid, ret=proc.exitstatus))
407 ERRORS += 1
408
409 log.info("Test list-attrs get-attr")
410 for basename in db.keys():
411 file = os.path.join(DATADIR, basename)
412 GETNAME = os.path.join(DATADIR, "get")
413 SETNAME = os.path.join(DATADIR, "set")
414
9f95a23c 415 for remote in osds.remotes.keys():
7c673cae 416 for role in osds.remotes[remote]:
e306af50 417 if not role.startswith("osd."):
7c673cae
FG
418 continue
419 osdid = int(role.split('.')[1])
420 if osdid not in pgs:
421 continue
422
9f95a23c 423 for pg, JSON in db[basename]["pg2json"].items():
7c673cae
FG
424 if pg in pgs[osdid]:
425 cmd = ((prefix + "--pgid {pg}").
426 format(id=osdid, pg=pg).split())
427 cmd.append(run.Raw("'{json}'".format(json=JSON)))
428 cmd += ["list-attrs"]
9f95a23c
TL
429 try:
430 keys = remote.sh(cmd, wait=True, stderr=BytesIO()).split()
431 except CommandFailedError as e:
7c673cae 432 log.error("Bad exit status {ret}".
9f95a23c 433 format(ret=e.exitstatus))
7c673cae
FG
434 ERRORS += 1
435 continue
7c673cae
FG
436 values = dict(db[basename]["xattr"])
437
438 for key in keys:
439 if (key == "_" or
440 key == "snapset" or
441 key == "hinfo_key"):
442 continue
443 key = key.strip("_")
444 if key not in values:
445 log.error("The key {key} should be present".
446 format(key=key))
447 ERRORS += 1
448 continue
449 exp = values.pop(key)
450 cmd = ((prefix + "--pgid {pg}").
451 format(id=osdid, pg=pg).split())
452 cmd.append(run.Raw("'{json}'".format(json=JSON)))
453 cmd += ("get-attr {key}".
454 format(key="_" + key).split())
9f95a23c
TL
455 try:
456 val = remote.sh(cmd, wait=True)
457 except CommandFailedError as e:
7c673cae 458 log.error("get-attr failed with {ret}".
9f95a23c 459 format(ret=e.exitstatus))
7c673cae
FG
460 ERRORS += 1
461 continue
7c673cae
FG
462 if exp != val:
463 log.error("For key {key} got value {got} "
464 "instead of {expected}".
465 format(key=key, got=val,
466 expected=exp))
467 ERRORS += 1
468 if "hinfo_key" in keys:
469 cmd_prefix = prefix.format(id=osdid)
470 cmd = """
471 expected=$({prefix} --pgid {pg} '{json}' get-attr {key} | base64)
472 echo placeholder | {prefix} --pgid {pg} '{json}' set-attr {key} -
473 test $({prefix} --pgid {pg} '{json}' get-attr {key}) = placeholder
474 echo $expected | base64 --decode | \
475 {prefix} --pgid {pg} '{json}' set-attr {key} -
476 test $({prefix} --pgid {pg} '{json}' get-attr {key} | base64) = $expected
477 """.format(prefix=cmd_prefix, pg=pg, json=JSON,
478 key="hinfo_key")
479 log.debug(cmd)
480 proc = remote.run(args=['bash', '-e', '-x',
481 '-c', cmd],
482 check_status=False,
9f95a23c
TL
483 stdout=BytesIO(),
484 stderr=BytesIO())
7c673cae
FG
485 proc.wait()
486 if proc.exitstatus != 0:
487 log.error("failed with " +
488 str(proc.exitstatus))
9f95a23c
TL
489 log.error(" ".join([
490 six.ensure_str(proc.stdout.getvalue()),
491 six.ensure_str(proc.stderr.getvalue()),
492 ]))
7c673cae
FG
493 ERRORS += 1
494
495 if len(values) != 0:
496 log.error("Not all keys found, remaining keys:")
497 log.error(values)
498
499 log.info("Test pg info")
9f95a23c 500 for remote in osds.remotes.keys():
7c673cae 501 for role in osds.remotes[remote]:
e306af50 502 if not role.startswith("osd."):
7c673cae
FG
503 continue
504 osdid = int(role.split('.')[1])
505 if osdid not in pgs:
506 continue
507
508 for pg in pgs[osdid]:
509 cmd = ((prefix + "--op info --pgid {pg}").
510 format(id=osdid, pg=pg).split())
9f95a23c
TL
511 try:
512 info = remote.sh(cmd, wait=True)
513 except CommandFailedError as e:
7c673cae 514 log.error("Failure of --op info command with {ret}".
9f95a23c 515 format(e.exitstatus))
7c673cae
FG
516 ERRORS += 1
517 continue
7c673cae
FG
518 if not str(pg) in info:
519 log.error("Bad data from info: {info}".format(info=info))
520 ERRORS += 1
521
522 log.info("Test pg logging")
9f95a23c 523 for remote in osds.remotes.keys():
7c673cae 524 for role in osds.remotes[remote]:
e306af50 525 if not role.startswith("osd."):
7c673cae
FG
526 continue
527 osdid = int(role.split('.')[1])
528 if osdid not in pgs:
529 continue
530
531 for pg in pgs[osdid]:
532 cmd = ((prefix + "--op log --pgid {pg}").
533 format(id=osdid, pg=pg).split())
9f95a23c
TL
534 try:
535 output = remote.sh(cmd, wait=True)
536 except CommandFailedError as e:
7c673cae
FG
537 log.error("Getting log failed for pg {pg} "
538 "from osd.{id} with {ret}".
9f95a23c 539 format(pg=pg, id=osdid, ret=e.exitstatus))
7c673cae
FG
540 ERRORS += 1
541 continue
542 HASOBJ = pg in pgswithobjects
9f95a23c 543 MODOBJ = "modify" in output
7c673cae
FG
544 if HASOBJ != MODOBJ:
545 log.error("Bad log for pg {pg} from osd.{id}".
546 format(pg=pg, id=osdid))
547 MSG = (HASOBJ and [""] or ["NOT "])[0]
548 log.error("Log should {msg}have a modify entry".
549 format(msg=MSG))
550 ERRORS += 1
551
552 log.info("Test pg export")
553 EXP_ERRORS = 0
9f95a23c 554 for remote in osds.remotes.keys():
7c673cae 555 for role in osds.remotes[remote]:
e306af50 556 if not role.startswith("osd."):
7c673cae
FG
557 continue
558 osdid = int(role.split('.')[1])
559 if osdid not in pgs:
560 continue
561
562 for pg in pgs[osdid]:
563 fpath = os.path.join(DATADIR, "osd{id}.{pg}".
564 format(id=osdid, pg=pg))
565
566 cmd = ((prefix + "--op export --pgid {pg} --file {file}").
567 format(id=osdid, pg=pg, file=fpath))
9f95a23c
TL
568 try:
569 remote.sh(cmd, wait=True)
570 except CommandFailedError as e:
7c673cae
FG
571 log.error("Exporting failed for pg {pg} "
572 "on osd.{id} with {ret}".
9f95a23c 573 format(pg=pg, id=osdid, ret=e.exitstatus))
7c673cae
FG
574 EXP_ERRORS += 1
575
576 ERRORS += EXP_ERRORS
577
578 log.info("Test pg removal")
579 RM_ERRORS = 0
9f95a23c 580 for remote in osds.remotes.keys():
7c673cae 581 for role in osds.remotes[remote]:
e306af50 582 if not role.startswith("osd."):
7c673cae
FG
583 continue
584 osdid = int(role.split('.')[1])
585 if osdid not in pgs:
586 continue
587
588 for pg in pgs[osdid]:
3efd9988 589 cmd = ((prefix + "--force --op remove --pgid {pg}").
7c673cae 590 format(pg=pg, id=osdid))
9f95a23c
TL
591 try:
592 remote.sh(cmd, wait=True)
593 except CommandFailedError as e:
7c673cae
FG
594 log.error("Removing failed for pg {pg} "
595 "on osd.{id} with {ret}".
9f95a23c 596 format(pg=pg, id=osdid, ret=e.exitstatus))
7c673cae
FG
597 RM_ERRORS += 1
598
599 ERRORS += RM_ERRORS
600
601 IMP_ERRORS = 0
602 if EXP_ERRORS == 0 and RM_ERRORS == 0:
603 log.info("Test pg import")
604
9f95a23c 605 for remote in osds.remotes.keys():
7c673cae 606 for role in osds.remotes[remote]:
e306af50 607 if not role.startswith("osd."):
7c673cae
FG
608 continue
609 osdid = int(role.split('.')[1])
610 if osdid not in pgs:
611 continue
612
613 for pg in pgs[osdid]:
614 fpath = os.path.join(DATADIR, "osd{id}.{pg}".
615 format(id=osdid, pg=pg))
616
617 cmd = ((prefix + "--op import --file {file}").
618 format(id=osdid, file=fpath))
9f95a23c
TL
619 try:
620 remote.sh(cmd, wait=True)
621 except CommandFailedError as e:
7c673cae 622 log.error("Import failed from {file} with {ret}".
9f95a23c 623 format(file=fpath, ret=e.exitstatus))
7c673cae
FG
624 IMP_ERRORS += 1
625 else:
626 log.warning("SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES")
627
628 ERRORS += IMP_ERRORS
629
630 if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0:
631 log.info("Restarting OSDs....")
632 # They are still look to be up because of setting nodown
633 for osd in manager.get_osd_status()['up']:
634 manager.revive_osd(osd)
635 # Wait for health?
636 time.sleep(5)
637 # Let scrub after test runs verify consistency of all copies
638 log.info("Verify replicated import data")
639 objects = range(1, NUM_OBJECTS + 1)
640 for i in objects:
641 NAME = REP_NAME + "{num}".format(num=i)
642 TESTNAME = os.path.join(DATADIR, "gettest")
643 REFNAME = os.path.join(DATADIR, NAME)
644
645 proc = rados(ctx, cli_remote,
646 ['-p', REP_POOL, 'get', NAME, TESTNAME], wait=False)
647
648 ret = proc.wait()
649 if ret != 0:
650 log.error("After import, rados get failed with {ret}".
651 format(ret=proc.exitstatus))
652 ERRORS += 1
653 continue
654
655 cmd = "diff -q {gettest} {ref}".format(gettest=TESTNAME,
656 ref=REFNAME)
657 proc = cli_remote.run(args=cmd, check_status=False)
658 proc.wait()
659 if proc.exitstatus != 0:
660 log.error("Data comparison failed for {obj}".format(obj=NAME))
661 ERRORS += 1
662
663 return ERRORS