]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/mds_creation_failure.py
1 # FIXME: this file has many undefined vars which are accessed!
6 from tasks
import ceph_manager
7 from teuthology
import misc
8 from teuthology
.orchestra
.run
import CommandFailedError
, Raw
10 log
= logging
.getLogger(__name__
)
13 @contextlib.contextmanager
14 def task(ctx
, config
):
16 Go through filesystem creation with a synthetic failure in an MDS
17 in its 'up:creating' state, to exercise the retry behaviour.
19 # Grab handles to the teuthology objects of interest
20 mdslist
= list(misc
.all_roles_of_type(ctx
.cluster
, 'mds'))
22 # Require exactly one MDS, the code path for creation failure when
23 # a standby is available is different
24 raise RuntimeError("This task requires exactly one MDS")
27 (mds_remote
,) = ctx
.cluster
.only('mds.{_id}'.format(_id
=mds_id
)).remotes
.keys()
28 manager
= ceph_manager
.CephManager(
29 mds_remote
, ctx
=ctx
, logger
=log
.getChild('ceph_manager'),
33 self
.fs
.set_max_mds(0)
34 self
.fs
.mds_stop(mds_id
)
35 self
.fs
.mds_fail(mds_id
)
37 # Reset the filesystem so that next start will go into CREATING
38 manager
.raw_cluster_cmd('fs', 'rm', "default", "--yes-i-really-mean-it")
39 manager
.raw_cluster_cmd('fs', 'new', "default", "metadata", "data")
41 # Start the MDS with mds_kill_create_at set, it will crash during creation
42 mds
.restart_with_args(["--mds_kill_create_at=1"])
45 except CommandFailedError
as e
:
47 log
.info("MDS creation killed as expected")
49 log
.error("Unexpected status code %s" % e
.exitstatus
)
52 # Since I have intentionally caused a crash, I will clean up the resulting core
53 # file to avoid task.internal.coredump seeing it as a failure.
54 log
.info("Removing core file from synthetic MDS failure")
55 mds_remote
.run(args
=['rm', '-f', Raw("{archive}/coredump/*.core".format(archive
=misc
.get_archive_dir(ctx
)))])
57 # It should have left the MDS map state still in CREATING
58 status
= self
.fs
.status().get_mds(mds_id
)
59 assert status
['state'] == 'up:creating'
61 # Start the MDS again without the kill flag set, it should proceed with creation successfully
64 # Wait for state ACTIVE
65 self
.fs
.wait_for_state("up:active", timeout
=120, mds_id
=mds_id
)
67 # The system should be back up in a happy healthy state, go ahead and run any further tasks
68 # inside this context.