]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/filesystem.py
6cc1ea55889d78d2d8c176d7ba9c6bb8df19ca60
2 from StringIO
import StringIO
5 from gevent
import Greenlet
13 from teuthology
.exceptions
import CommandFailedError
14 from teuthology
import misc
15 from teuthology
.nuke
import clear_firewall
16 from teuthology
.parallel
import parallel
17 from tasks
.ceph_manager
import write_conf
18 from tasks
import ceph_manager
21 log
= logging
.getLogger(__name__
)
24 DAEMON_WAIT_TIMEOUT
= 120
28 class ObjectNotFound(Exception):
29 def __init__(self
, object_name
):
30 self
._object
_name
= object_name
33 return "Object not found: '{0}'".format(self
._object
_name
)
35 class FSStatus(object):
37 Operations on a snapshot of the FSMap.
39 def __init__(self
, mon_manager
):
40 self
.mon
= mon_manager
41 self
.map = json
.loads(self
.mon
.raw_cluster_cmd("fs", "dump", "--format=json"))
44 return json
.dumps(self
.map, indent
= 2, sort_keys
= True)
46 # Expose the fsmap for manual inspection.
47 def __getitem__(self
, key
):
49 Get a field from the fsmap.
53 def get_filesystems(self
):
55 Iterator for all filesystems.
57 for fs
in self
.map['filesystems']:
62 Iterator for all the mds_info components in the FSMap.
64 for info
in self
.get_standbys():
66 for fs
in self
.map['filesystems']:
67 for info
in fs
['mdsmap']['info'].values():
70 def get_standbys(self
):
72 Iterator for all standbys.
74 for info
in self
.map['standbys']:
77 def get_fsmap(self
, fscid
):
79 Get the fsmap for the given FSCID.
81 for fs
in self
.map['filesystems']:
82 if fscid
is None or fs
['id'] == fscid
:
84 raise RuntimeError("FSCID {0} not in map".format(fscid
))
86 def get_fsmap_byname(self
, name
):
88 Get the fsmap for the given file system name.
90 for fs
in self
.map['filesystems']:
91 if name
is None or fs
['mdsmap']['fs_name'] == name
:
93 raise RuntimeError("FS {0} not in map".format(name
))
95 def get_replays(self
, fscid
):
97 Get the standby:replay MDS for the given FSCID.
99 fs
= self
.get_fsmap(fscid
)
100 for info
in fs
['mdsmap']['info'].values():
101 if info
['state'] == 'up:standby-replay':
104 def get_ranks(self
, fscid
):
106 Get the ranks for the given FSCID.
108 fs
= self
.get_fsmap(fscid
)
109 for info
in fs
['mdsmap']['info'].values():
110 if info
['rank'] >= 0:
113 def get_rank(self
, fscid
, rank
):
115 Get the rank for the given FSCID.
117 for info
in self
.get_ranks(fscid
):
118 if info
['rank'] == rank
:
120 raise RuntimeError("FSCID {0} has no rank {1}".format(fscid
, rank
))
122 def get_mds(self
, name
):
124 Get the info for the given MDS name.
126 for info
in self
.get_all():
127 if info
['name'] == name
:
131 def get_mds_addr(self
, name
):
133 Return the instance addr as a string, like "10.214.133.138:6807\/10825"
135 info
= self
.get_mds(name
)
139 log
.warn(json
.dumps(list(self
.get_all()), indent
=2)) # dump for debugging
140 raise RuntimeError("MDS id '{0}' not found in map".format(name
))
142 class CephCluster(object):
144 def admin_remote(self
):
145 first_mon
= misc
.get_first_mon(self
._ctx
, None)
146 (result
,) = self
._ctx
.cluster
.only(first_mon
).remotes
.iterkeys()
149 def __init__(self
, ctx
):
151 self
.mon_manager
= ceph_manager
.CephManager(self
.admin_remote
, ctx
=ctx
, logger
=log
.getChild('ceph_manager'))
153 def get_config(self
, key
, service_type
=None):
155 Get config from mon by default, or a specific service if caller asks for it
157 if service_type
is None:
160 service_id
= sorted(misc
.all_roles_of_type(self
._ctx
.cluster
, service_type
))[0]
161 return self
.json_asok(['config', 'get', key
], service_type
, service_id
)[key
]
163 def set_ceph_conf(self
, subsys
, key
, value
):
164 if subsys
not in self
._ctx
.ceph
['ceph'].conf
:
165 self
._ctx
.ceph
['ceph'].conf
[subsys
] = {}
166 self
._ctx
.ceph
['ceph'].conf
[subsys
][key
] = value
167 write_conf(self
._ctx
) # XXX because we don't have the ceph task's config object, if they
168 # used a different config path this won't work.
170 def clear_ceph_conf(self
, subsys
, key
):
171 del self
._ctx
.ceph
['ceph'].conf
[subsys
][key
]
172 write_conf(self
._ctx
)
174 def json_asok(self
, command
, service_type
, service_id
):
175 proc
= self
.mon_manager
.admin_socket(service_type
, service_id
, command
)
176 response_data
= proc
.stdout
.getvalue()
177 log
.info("_json_asok output: {0}".format(response_data
))
178 if response_data
.strip():
179 return json
.loads(response_data
)
184 class MDSCluster(CephCluster
):
186 Collective operations on all the MDS daemons in the Ceph cluster. These
187 daemons may be in use by various Filesystems.
189 For the benefit of pre-multi-filesystem tests, this class is also
190 a parent of Filesystem. The correct way to use MDSCluster going forward is
191 as a separate instance outside of your (multiple) Filesystem instances.
193 def __init__(self
, ctx
):
194 super(MDSCluster
, self
).__init
__(ctx
)
196 self
.mds_ids
= list(misc
.all_roles_of_type(ctx
.cluster
, 'mds'))
198 if len(self
.mds_ids
) == 0:
199 raise RuntimeError("This task requires at least one MDS")
201 if hasattr(self
._ctx
, "daemons"):
202 # Presence of 'daemons' attribute implies ceph task rather than ceph_deploy task
203 self
.mds_daemons
= dict([(mds_id
, self
._ctx
.daemons
.get_daemon('mds', mds_id
)) for mds_id
in self
.mds_ids
])
205 def _one_or_all(self
, mds_id
, cb
, in_parallel
=True):
207 Call a callback for a single named MDS, or for all.
209 Note that the parallelism here isn't for performance, it's to avoid being overly kind
210 to the cluster by waiting a graceful ssh-latency of time between doing things, and to
211 avoid being overly kind by executing them in a particular order. However, some actions
212 don't cope with being done in parallel, so it's optional (`in_parallel`)
214 :param mds_id: MDS daemon name, or None
215 :param cb: Callback taking single argument of MDS daemon name
216 :param in_parallel: whether to invoke callbacks concurrently (else one after the other)
220 with
parallel() as p
:
221 for mds_id
in self
.mds_ids
:
224 for mds_id
in self
.mds_ids
:
229 def get_config(self
, key
, service_type
=None):
231 get_config specialization of service_type="mds"
233 if service_type
!= "mds":
234 return super(MDSCluster
, self
).get_config(key
, service_type
)
236 # Some tests stop MDS daemons, don't send commands to a dead one:
237 service_id
= random
.sample(filter(lambda i
: self
.mds_daemons
[i
].running(), self
.mds_daemons
), 1)[0]
238 return self
.json_asok(['config', 'get', key
], service_type
, service_id
)[key
]
240 def mds_stop(self
, mds_id
=None):
242 Stop the MDS daemon process(se). If it held a rank, that rank
243 will eventually go laggy.
245 self
._one
_or
_all
(mds_id
, lambda id_
: self
.mds_daemons
[id_
].stop())
247 def mds_fail(self
, mds_id
=None):
249 Inform MDSMonitor of the death of the daemon process(es). If it held
250 a rank, that rank will be relinquished.
252 self
._one
_or
_all
(mds_id
, lambda id_
: self
.mon_manager
.raw_cluster_cmd("mds", "fail", id_
))
254 def mds_restart(self
, mds_id
=None):
255 self
._one
_or
_all
(mds_id
, lambda id_
: self
.mds_daemons
[id_
].restart())
257 def mds_fail_restart(self
, mds_id
=None):
259 Variation on restart that includes marking MDSs as failed, so that doing this
260 operation followed by waiting for healthy daemon states guarantees that they
261 have gone down and come up, rather than potentially seeing the healthy states
262 that existed before the restart.
264 def _fail_restart(id_
):
265 self
.mds_daemons
[id_
].stop()
266 self
.mon_manager
.raw_cluster_cmd("mds", "fail", id_
)
267 self
.mds_daemons
[id_
].restart()
269 self
._one
_or
_all
(mds_id
, _fail_restart
)
271 def newfs(self
, name
='cephfs', create
=True):
272 return Filesystem(self
._ctx
, name
=name
, create
=create
)
275 return FSStatus(self
.mon_manager
)
277 def delete_all_filesystems(self
):
279 Remove all filesystems that exist, and any pools in use by them.
281 pools
= json
.loads(self
.mon_manager
.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
284 pool_id_name
[pool
['pool']] = pool
['pool_name']
286 # mark cluster down for each fs to prevent churn during deletion
287 status
= self
.status()
288 for fs
in status
.get_filesystems():
289 self
.mon_manager
.raw_cluster_cmd("fs", "set", fs
['mdsmap']['fs_name'], "cluster_down", "true")
291 # get a new copy as actives may have since changed
292 status
= self
.status()
293 for fs
in status
.get_filesystems():
294 mdsmap
= fs
['mdsmap']
295 metadata_pool
= pool_id_name
[mdsmap
['metadata_pool']]
297 for gid
in mdsmap
['up'].values():
298 self
.mon_manager
.raw_cluster_cmd('mds', 'fail', gid
.__str
__())
300 self
.mon_manager
.raw_cluster_cmd('fs', 'rm', mdsmap
['fs_name'], '--yes-i-really-mean-it')
301 self
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'delete',
302 metadata_pool
, metadata_pool
,
303 '--yes-i-really-really-mean-it')
304 for data_pool
in mdsmap
['data_pools']:
305 data_pool
= pool_id_name
[data_pool
]
307 self
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'delete',
308 data_pool
, data_pool
,
309 '--yes-i-really-really-mean-it')
310 except CommandFailedError
as e
:
311 if e
.exitstatus
== 16: # EBUSY, this data pool is used
312 pass # by two metadata pools, let the 2nd
313 else: # pass delete it
316 def get_standby_daemons(self
):
317 return set([s
['name'] for s
in self
.status().get_standbys()])
319 def get_mds_hostnames(self
):
321 for mds_id
in self
.mds_ids
:
322 mds_remote
= self
.mon_manager
.find_remote('mds', mds_id
)
323 result
.add(mds_remote
.hostname
)
327 def set_clients_block(self
, blocked
, mds_id
=None):
329 Block (using iptables) client communications to this MDS. Be careful: if
330 other services are running on this MDS, or other MDSs try to talk to this
331 MDS, their communications may also be blocked as collatoral damage.
333 :param mds_id: Optional ID of MDS to block, default to all
336 da_flag
= "-A" if blocked
else "-D"
338 def set_block(_mds_id
):
339 remote
= self
.mon_manager
.find_remote('mds', _mds_id
)
340 status
= self
.status()
342 addr
= status
.get_mds_addr(_mds_id
)
343 ip_str
, port_str
, inst_str
= re
.match("(.+):(.+)/(.+)", addr
).groups()
346 args
=["sudo", "iptables", da_flag
, "OUTPUT", "-p", "tcp", "--sport", port_str
, "-j", "REJECT", "-m",
347 "comment", "--comment", "teuthology"])
349 args
=["sudo", "iptables", da_flag
, "INPUT", "-p", "tcp", "--dport", port_str
, "-j", "REJECT", "-m",
350 "comment", "--comment", "teuthology"])
352 self
._one
_or
_all
(mds_id
, set_block
, in_parallel
=False)
354 def clear_firewall(self
):
355 clear_firewall(self
._ctx
)
357 def get_mds_info(self
, mds_id
):
358 return FSStatus(self
.mon_manager
).get_mds(mds_id
)
360 def is_pool_full(self
, pool_name
):
361 pools
= json
.loads(self
.mon_manager
.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
363 if pool
['pool_name'] == pool_name
:
364 return 'full' in pool
['flags_names'].split(",")
366 raise RuntimeError("Pool not found '{0}'".format(pool_name
))
368 class Filesystem(MDSCluster
):
370 This object is for driving a CephFS filesystem. The MDS daemons driven by
371 MDSCluster may be shared with other Filesystems.
373 def __init__(self
, ctx
, fscid
=None, name
=None, create
=False,
375 super(Filesystem
, self
).__init
__(ctx
)
378 self
.ec_profile
= ec_profile
380 self
.metadata_pool_name
= None
381 self
.metadata_overlay
= False
382 self
.data_pool_name
= None
383 self
.data_pools
= None
385 client_list
= list(misc
.all_roles_of_type(self
._ctx
.cluster
, 'client'))
386 self
.client_id
= client_list
[0]
387 self
.client_remote
= list(misc
.get_clients(ctx
=ctx
, roles
=["client.{0}".format(self
.client_id
)]))[0][1]
390 if fscid
is not None:
391 raise RuntimeError("cannot specify fscid when creating fs")
392 if create
and not self
.legacy_configured():
395 if fscid
is not None:
397 self
.getinfo(refresh
= True)
399 # Stash a reference to the first created filesystem on ctx, so
400 # that if someone drops to the interactive shell they can easily
402 if not hasattr(self
._ctx
, "filesystem"):
403 self
._ctx
.filesystem
= self
405 def getinfo(self
, refresh
= False):
406 status
= self
.status()
407 if self
.id is not None:
408 fsmap
= status
.get_fsmap(self
.id)
409 elif self
.name
is not None:
410 fsmap
= status
.get_fsmap_byname(self
.name
)
412 fss
= [fs
for fs
in status
.get_filesystems()]
416 raise RuntimeError("no file system available")
418 raise RuntimeError("more than one file system available")
419 self
.id = fsmap
['id']
420 self
.name
= fsmap
['mdsmap']['fs_name']
421 self
.get_pool_names(status
= status
, refresh
= refresh
)
424 def set_metadata_overlay(self
, overlay
):
425 if self
.id is not None:
426 raise RuntimeError("cannot specify fscid when configuring overlay")
427 self
.metadata_overlay
= overlay
429 def deactivate(self
, rank
):
431 raise RuntimeError("invalid rank")
433 raise RuntimeError("cannot deactivate rank 0")
434 self
.mon_manager
.raw_cluster_cmd("mds", "deactivate", "%d:%d" % (self
.id, rank
))
436 def set_max_mds(self
, max_mds
):
437 self
.mon_manager
.raw_cluster_cmd("fs", "set", self
.name
, "max_mds", "%d" % max_mds
)
439 def set_allow_dirfrags(self
, yes
):
440 self
.mon_manager
.raw_cluster_cmd("fs", "set", self
.name
, "allow_dirfrags", str(yes
).lower(), '--yes-i-really-mean-it')
442 def get_pgs_per_fs_pool(self
):
444 Calculate how many PGs to use when creating a pool, in order to avoid raising any
445 health warnings about mon_pg_warn_min_per_osd
447 :return: an integer number of PGs
449 pg_warn_min_per_osd
= int(self
.get_config('mon_pg_warn_min_per_osd'))
450 osd_count
= len(list(misc
.all_roles_of_type(self
._ctx
.cluster
, 'osd')))
451 return pg_warn_min_per_osd
* osd_count
454 if self
.name
is None:
456 if self
.metadata_pool_name
is None:
457 self
.metadata_pool_name
= "{0}_metadata".format(self
.name
)
458 if self
.data_pool_name
is None:
459 data_pool_name
= "{0}_data".format(self
.name
)
461 data_pool_name
= self
.data_pool_name
463 log
.info("Creating filesystem '{0}'".format(self
.name
))
465 pgs_per_fs_pool
= self
.get_pgs_per_fs_pool()
467 self
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create',
468 self
.metadata_pool_name
, pgs_per_fs_pool
.__str
__())
469 if self
.metadata_overlay
:
470 self
.mon_manager
.raw_cluster_cmd('fs', 'new',
471 self
.name
, self
.metadata_pool_name
, data_pool_name
,
472 '--allow-dangerous-metadata-overlay')
474 if self
.ec_profile
and 'disabled' not in self
.ec_profile
:
475 log
.info("EC profile is %s", self
.ec_profile
)
476 cmd
= ['osd', 'erasure-code-profile', 'set', data_pool_name
]
477 cmd
.extend(self
.ec_profile
)
478 self
.mon_manager
.raw_cluster_cmd(*cmd
)
479 self
.mon_manager
.raw_cluster_cmd(
480 'osd', 'pool', 'create',
481 data_pool_name
, pgs_per_fs_pool
.__str
__(), 'erasure',
483 self
.mon_manager
.raw_cluster_cmd(
484 'osd', 'pool', 'set',
485 data_pool_name
, 'allow_ec_overwrites', 'true')
487 self
.mon_manager
.raw_cluster_cmd(
488 'osd', 'pool', 'create',
489 data_pool_name
, pgs_per_fs_pool
.__str
__())
490 self
.mon_manager
.raw_cluster_cmd('fs', 'new',
491 self
.name
, self
.metadata_pool_name
, data_pool_name
)
492 self
.check_pool_application(self
.metadata_pool_name
)
493 self
.check_pool_application(data_pool_name
)
494 # Turn off spurious standby count warnings from modifying max_mds in tests.
496 self
.mon_manager
.raw_cluster_cmd('fs', 'set', self
.name
, 'standby_count_wanted', '0')
497 except CommandFailedError
as e
:
498 if e
.exitstatus
== 22:
499 # standby_count_wanted not available prior to luminous (upgrade tests would fail otherwise)
504 self
.getinfo(refresh
= True)
507 def check_pool_application(self
, pool_name
):
508 osd_map
= self
.mon_manager
.get_osd_dump_json()
509 for pool
in osd_map
['pools']:
510 if pool
['pool_name'] == pool_name
:
511 if "application_metadata" in pool
:
512 if not "cephfs" in pool
['application_metadata']:
513 raise RuntimeError("Pool %p does not name cephfs as application!".\
518 if getattr(self
._ctx
, "filesystem", None) == self
:
519 delattr(self
._ctx
, "filesystem")
523 Whether a filesystem exists in the mon's filesystem list
525 fs_list
= json
.loads(self
.mon_manager
.raw_cluster_cmd('fs', 'ls', '--format=json-pretty'))
526 return self
.name
in [fs
['name'] for fs
in fs_list
]
528 def legacy_configured(self
):
530 Check if a legacy (i.e. pre "fs new") filesystem configuration is present. If this is
531 the case, the caller should avoid using Filesystem.create
534 out_text
= self
.mon_manager
.raw_cluster_cmd('--format=json-pretty', 'osd', 'lspools')
535 pools
= json
.loads(out_text
)
536 metadata_pool_exists
= 'metadata' in [p
['poolname'] for p
in pools
]
537 if metadata_pool_exists
:
538 self
.metadata_pool_name
= 'metadata'
539 except CommandFailedError
as e
:
540 # For use in upgrade tests, Ceph cuttlefish and earlier don't support
541 # structured output (--format) from the CLI.
542 if e
.exitstatus
== 22:
543 metadata_pool_exists
= True
547 return metadata_pool_exists
550 return json
.loads(self
.mon_manager
.raw_cluster_cmd("df", "--format=json-pretty"))
552 def get_mds_map(self
):
553 return self
.status().get_fsmap(self
.id)['mdsmap']
555 def add_data_pool(self
, name
):
556 self
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create', name
, self
.get_pgs_per_fs_pool().__str
__())
557 self
.mon_manager
.raw_cluster_cmd('fs', 'add_data_pool', self
.name
, name
)
558 self
.get_pool_names(refresh
= True)
559 for poolid
, fs_name
in self
.data_pools
.items():
562 raise RuntimeError("could not get just created pool '{0}'".format(name
))
564 def get_pool_names(self
, refresh
= False, status
= None):
565 if refresh
or self
.metadata_pool_name
is None or self
.data_pools
is None:
567 status
= self
.status()
568 fsmap
= status
.get_fsmap(self
.id)
570 osd_map
= self
.mon_manager
.get_osd_dump_json()
572 for p
in osd_map
['pools']:
573 id_to_name
[p
['pool']] = p
['pool_name']
575 self
.metadata_pool_name
= id_to_name
[fsmap
['mdsmap']['metadata_pool']]
577 for data_pool
in fsmap
['mdsmap']['data_pools']:
578 self
.data_pools
[data_pool
] = id_to_name
[data_pool
]
580 def get_data_pool_name(self
, refresh
= False):
581 if refresh
or self
.data_pools
is None:
582 self
.get_pool_names(refresh
= True)
583 assert(len(self
.data_pools
) == 1)
584 return self
.data_pools
.values()[0]
586 def get_data_pool_id(self
, refresh
= False):
588 Don't call this if you have multiple data pools
591 if refresh
or self
.data_pools
is None:
592 self
.get_pool_names(refresh
= True)
593 assert(len(self
.data_pools
) == 1)
594 return self
.data_pools
.keys()[0]
596 def get_data_pool_names(self
, refresh
= False):
597 if refresh
or self
.data_pools
is None:
598 self
.get_pool_names(refresh
= True)
599 return self
.data_pools
.values()
601 def get_metadata_pool_name(self
):
602 return self
.metadata_pool_name
604 def set_data_pool_name(self
, name
):
605 if self
.id is not None:
606 raise RuntimeError("can't set filesystem name if its fscid is set")
607 self
.data_pool_name
= name
609 def get_namespace_id(self
):
612 def get_pool_df(self
, pool_name
):
615 {u'bytes_used': 0, u'max_avail': 83848701, u'objects': 0, u'kb_used': 0}
617 for pool_df
in self
._df
()['pools']:
618 if pool_df
['name'] == pool_name
:
619 return pool_df
['stats']
621 raise RuntimeError("Pool name '{0}' not found".format(pool_name
))
624 return self
._df
()['stats']['total_used_bytes']
626 def are_daemons_healthy(self
):
628 Return true if all daemons are in one of active, standby, standby-replay, and
629 at least max_mds daemons are in 'active'.
631 Unlike most of Filesystem, this function is tolerant of new-style `fs`
632 commands being missing, because we are part of the ceph installation
633 process during upgrade suites, so must fall back to old style commands
634 when we get an EINVAL on a new style command.
641 mds_map
= self
.get_mds_map()
642 except CommandFailedError
as cfe
:
643 # Old version, fall back to non-multi-fs commands
644 if cfe
.exitstatus
== errno
.EINVAL
:
645 mds_map
= json
.loads(
646 self
.mon_manager
.raw_cluster_cmd('mds', 'dump', '--format=json'))
650 log
.info("are_daemons_healthy: mds map: {0}".format(mds_map
))
652 for mds_id
, mds_status
in mds_map
['info'].items():
653 if mds_status
['state'] not in ["up:active", "up:standby", "up:standby-replay"]:
654 log
.warning("Unhealthy mds state {0}:{1}".format(mds_id
, mds_status
['state']))
656 elif mds_status
['state'] == 'up:active':
659 log
.info("are_daemons_healthy: {0}/{1}".format(
660 active_count
, mds_map
['max_mds']
663 if active_count
>= mds_map
['max_mds']:
664 # The MDSMap says these guys are active, but let's check they really are
665 for mds_id
, mds_status
in mds_map
['info'].items():
666 if mds_status
['state'] == 'up:active':
668 daemon_status
= self
.mds_asok(["status"], mds_id
=mds_status
['name'])
669 except CommandFailedError
as cfe
:
670 if cfe
.exitstatus
== errno
.EINVAL
:
671 # Old version, can't do this check
674 # MDS not even running
677 if daemon_status
['state'] != 'up:active':
678 # MDS hasn't taken the latest map yet
685 def get_daemon_names(self
, state
=None):
687 Return MDS daemon names of those daemons in the given state
691 status
= self
.get_mds_map()
693 for mds_status
in sorted(status
['info'].values(), lambda a
, b
: cmp(a
['rank'], b
['rank'])):
694 if mds_status
['state'] == state
or state
is None:
695 result
.append(mds_status
['name'])
699 def get_active_names(self
):
701 Return MDS daemon names of those daemons holding ranks
704 :return: list of strings like ['a', 'b'], sorted by rank
706 return self
.get_daemon_names("up:active")
708 def get_all_mds_rank(self
):
709 status
= self
.get_mds_map()
711 for mds_status
in sorted(status
['info'].values(), lambda a
, b
: cmp(a
['rank'], b
['rank'])):
712 if mds_status
['rank'] != -1 and mds_status
['state'] != 'up:standby-replay':
713 result
.append(mds_status
['rank'])
717 def get_rank_names(self
):
719 Return MDS daemon names of those daemons holding a rank,
720 sorted by rank. This includes e.g. up:replay/reconnect
721 as well as active, but does not include standby or
724 status
= self
.get_mds_map()
726 for mds_status
in sorted(status
['info'].values(), lambda a
, b
: cmp(a
['rank'], b
['rank'])):
727 if mds_status
['rank'] != -1 and mds_status
['state'] != 'up:standby-replay':
728 result
.append(mds_status
['name'])
732 def wait_for_daemons(self
, timeout
=None):
734 Wait until all daemons are healthy
739 timeout
= DAEMON_WAIT_TIMEOUT
743 if self
.are_daemons_healthy():
749 if elapsed
> timeout
:
750 raise RuntimeError("Timed out waiting for MDS daemons to become healthy")
752 def get_lone_mds_id(self
):
754 Get a single MDS ID: the only one if there is only one
755 configured, else the only one currently holding a rank,
758 if len(self
.mds_ids
) != 1:
759 alive
= self
.get_rank_names()
763 raise ValueError("Explicit MDS argument required when multiple MDSs in use")
765 return self
.mds_ids
[0]
768 log
.info("Creating new filesystem")
769 self
.delete_all_filesystems()
773 def put_metadata_object_raw(self
, object_id
, infile
):
775 Save an object to the metadata pool
777 temp_bin_path
= infile
778 self
.client_remote
.run(args
=[
779 'sudo', os
.path
.join(self
._prefix
, 'rados'), '-p', self
.metadata_pool_name
, 'put', object_id
, temp_bin_path
782 def get_metadata_object_raw(self
, object_id
):
784 Retrieve an object from the metadata pool and store it in a file.
786 temp_bin_path
= '/tmp/' + object_id
+ '.bin'
788 self
.client_remote
.run(args
=[
789 'sudo', os
.path
.join(self
._prefix
, 'rados'), '-p', self
.metadata_pool_name
, 'get', object_id
, temp_bin_path
794 def get_metadata_object(self
, object_type
, object_id
):
796 Retrieve an object from the metadata pool, pass it through
797 ceph-dencoder to dump it to JSON, and return the decoded object.
799 temp_bin_path
= '/tmp/out.bin'
801 self
.client_remote
.run(args
=[
802 'sudo', os
.path
.join(self
._prefix
, 'rados'), '-p', self
.metadata_pool_name
, 'get', object_id
, temp_bin_path
806 self
.client_remote
.run(args
=[
807 'sudo', os
.path
.join(self
._prefix
, 'ceph-dencoder'), 'type', object_type
, 'import', temp_bin_path
, 'decode', 'dump_json'
809 dump_json
= stdout
.getvalue().strip()
811 dump
= json
.loads(dump_json
)
812 except (TypeError, ValueError):
813 log
.error("Failed to decode JSON: '{0}'".format(dump_json
))
818 def get_journal_version(self
):
820 Read the JournalPointer and Journal::Header objects to learn the version of
823 journal_pointer_object
= '400.00000000'
824 journal_pointer_dump
= self
.get_metadata_object("JournalPointer", journal_pointer_object
)
825 journal_ino
= journal_pointer_dump
['journal_pointer']['front']
827 journal_header_object
= "{0:x}.00000000".format(journal_ino
)
828 journal_header_dump
= self
.get_metadata_object('Journaler::Header', journal_header_object
)
830 version
= journal_header_dump
['journal_header']['stream_format']
831 log
.info("Read journal version {0}".format(version
))
835 def mds_asok(self
, command
, mds_id
=None):
837 mds_id
= self
.get_lone_mds_id()
839 return self
.json_asok(command
, 'mds', mds_id
)
841 def read_cache(self
, path
, depth
=None):
842 cmd
= ["dump", "tree", path
]
843 if depth
is not None:
844 cmd
.append(depth
.__str
__())
845 result
= self
.mds_asok(cmd
)
847 raise RuntimeError("Path not found in cache: {0}".format(path
))
851 def wait_for_state(self
, goal_state
, reject
=None, timeout
=None, mds_id
=None, rank
=None):
853 Block until the MDS reaches a particular state, or a failure condition
856 When there are multiple MDSs, succeed when exaclty one MDS is in the
857 goal state, or fail when any MDS is in the reject state.
859 :param goal_state: Return once the MDS is in this state
860 :param reject: Fail if the MDS enters this state before the goal state
861 :param timeout: Fail if this many seconds pass before reaching goal
862 :return: number of seconds waited, rounded down to integer
865 started_at
= time
.time()
867 status
= self
.status()
869 mds_info
= status
.get_rank(self
.id, rank
)
870 current_state
= mds_info
['state'] if mds_info
else None
871 log
.info("Looked up MDS state for mds.{0}: {1}".format(rank
, current_state
))
872 elif mds_id
is not None:
873 # mds_info is None if no daemon with this ID exists in the map
874 mds_info
= status
.get_mds(mds_id
)
875 current_state
= mds_info
['state'] if mds_info
else None
876 log
.info("Looked up MDS state for {0}: {1}".format(mds_id
, current_state
))
878 # In general, look for a single MDS
879 states
= [m
['state'] for m
in status
.get_ranks(self
.id)]
880 if [s
for s
in states
if s
== goal_state
] == [goal_state
]:
881 current_state
= goal_state
882 elif reject
in states
:
883 current_state
= reject
886 log
.info("mapped states {0} to {1}".format(states
, current_state
))
888 elapsed
= time
.time() - started_at
889 if current_state
== goal_state
:
890 log
.info("reached state '{0}' in {1}s".format(current_state
, elapsed
))
892 elif reject
is not None and current_state
== reject
:
893 raise RuntimeError("MDS in reject state {0}".format(current_state
))
894 elif timeout
is not None and elapsed
> timeout
:
895 log
.error("MDS status at timeout: {0}".format(status
.get_fsmap(self
.id)))
897 "Reached timeout after {0} seconds waiting for state {1}, while in state {2}".format(
898 elapsed
, goal_state
, current_state
903 def _read_data_xattr(self
, ino_no
, xattr_name
, type, pool
):
904 mds_id
= self
.mds_ids
[0]
905 remote
= self
.mds_daemons
[mds_id
].remote
907 pool
= self
.get_data_pool_name()
909 obj_name
= "{0:x}.00000000".format(ino_no
)
912 os
.path
.join(self
._prefix
, "rados"), "-p", pool
, "getxattr", obj_name
, xattr_name
918 except CommandFailedError
as e
:
919 log
.error(e
.__str
__())
920 raise ObjectNotFound(obj_name
)
922 data
= proc
.stdout
.getvalue()
925 args
=[os
.path
.join(self
._prefix
, "ceph-dencoder"), "type", type, "import", "-", "decode", "dump_json"],
930 return json
.loads(p
.stdout
.getvalue().strip())
932 def _write_data_xattr(self
, ino_no
, xattr_name
, data
, pool
=None):
934 Write to an xattr of the 0th data object of an inode. Will
935 succeed whether the object and/or xattr already exist or not.
937 :param ino_no: integer inode number
938 :param xattr_name: string name of the xattr
939 :param data: byte array data to write to the xattr
940 :param pool: name of data pool or None to use primary data pool
943 remote
= self
.mds_daemons
[self
.mds_ids
[0]].remote
945 pool
= self
.get_data_pool_name()
947 obj_name
= "{0:x}.00000000".format(ino_no
)
949 os
.path
.join(self
._prefix
, "rados"), "-p", pool
, "setxattr",
950 obj_name
, xattr_name
, data
956 def read_backtrace(self
, ino_no
, pool
=None):
958 Read the backtrace from the data pool, return a dict in the format
959 given by inode_backtrace_t::dump, which is something like:
963 rados -p cephfs_data getxattr 10000000002.00000000 parent > out.bin
964 ceph-dencoder type inode_backtrace_t import out.bin decode dump_json
966 { "ino": 1099511627778,
974 :param pool: name of pool to read backtrace from. If omitted, FS must have only
975 one data pool and that will be used.
977 return self
._read
_data
_xattr
(ino_no
, "parent", "inode_backtrace_t", pool
)
979 def read_layout(self
, ino_no
, pool
=None):
981 Read 'layout' xattr of an inode and parse the result, returning a dict like:
984 "stripe_unit": 4194304,
986 "object_size": 4194304,
991 :param pool: name of pool to read backtrace from. If omitted, FS must have only
992 one data pool and that will be used.
994 return self
._read
_data
_xattr
(ino_no
, "layout", "file_layout_t", pool
)
996 def _enumerate_data_objects(self
, ino
, size
):
998 Get the list of expected data objects for a range, and the list of objects
1001 :return a tuple of two lists of strings (expected, actual)
1003 stripe_size
= 1024 * 1024 * 4
1005 size
= max(stripe_size
, size
)
1008 "{0:x}.{1:08x}".format(ino
, n
)
1009 for n
in range(0, ((size
- 1) / stripe_size
) + 1)
1012 exist_objects
= self
.rados(["ls"], pool
=self
.get_data_pool_name()).split("\n")
1014 return want_objects
, exist_objects
1016 def data_objects_present(self
, ino
, size
):
1018 Check that *all* the expected data objects for an inode are present in the data pool
1021 want_objects
, exist_objects
= self
._enumerate
_data
_objects
(ino
, size
)
1022 missing
= set(want_objects
) - set(exist_objects
)
1025 log
.info("Objects missing (ino {0}, size {1}): {2}".format(
1030 log
.info("All objects for ino {0} size {1} found".format(ino
, size
))
1033 def data_objects_absent(self
, ino
, size
):
1034 want_objects
, exist_objects
= self
._enumerate
_data
_objects
(ino
, size
)
1035 present
= set(want_objects
) & set(exist_objects
)
1038 log
.info("Objects not absent (ino {0}, size {1}): {2}".format(
1043 log
.info("All objects for ino {0} size {1} are absent".format(ino
, size
))
1046 def dirfrag_exists(self
, ino
, frag
):
1048 self
.rados(["stat", "{0:x}.{1:08x}".format(ino
, frag
)])
1049 except CommandFailedError
as e
:
1054 def rados(self
, args
, pool
=None, namespace
=None, stdin_data
=None,
1057 Call into the `rados` CLI from an MDS
1061 pool
= self
.get_metadata_pool_name()
1063 # Doesn't matter which MDS we use to run rados commands, they all
1064 # have access to the pools
1065 mds_id
= self
.mds_ids
[0]
1066 remote
= self
.mds_daemons
[mds_id
].remote
1068 # NB we could alternatively use librados pybindings for this, but it's a one-liner
1069 # using the `rados` CLI
1070 args
= ([os
.path
.join(self
._prefix
, "rados"), "-p", pool
] +
1071 (["--namespace", namespace
] if namespace
else []) +
1074 if stdin_file
is not None:
1075 args
= ["bash", "-c", "cat " + stdin_file
+ " | " + " ".join(args
)]
1081 return p
.stdout
.getvalue().strip()
1083 def list_dirfrag(self
, dir_ino
):
1085 Read the named object and return the list of omap keys
1087 :return a list of 0 or more strings
1090 dirfrag_obj_name
= "{0:x}.00000000".format(dir_ino
)
1093 key_list_str
= self
.rados(["listomapkeys", dirfrag_obj_name
])
1094 except CommandFailedError
as e
:
1095 log
.error(e
.__str
__())
1096 raise ObjectNotFound(dirfrag_obj_name
)
1098 return key_list_str
.split("\n") if key_list_str
else []
1100 def erase_metadata_objects(self
, prefix
):
1102 For all objects in the metadata pool matching the prefix,
1105 This O(N) with the number of objects in the pool, so only suitable
1106 for use on toy test filesystems.
1108 all_objects
= self
.rados(["ls"]).split("\n")
1109 matching_objects
= [o
for o
in all_objects
if o
.startswith(prefix
)]
1110 for o
in matching_objects
:
1111 self
.rados(["rm", o
])
1113 def erase_mds_objects(self
, rank
):
1115 Erase all the per-MDS objects for a particular rank. This includes
1116 inotable, sessiontable, journal
1119 def obj_prefix(multiplier
):
1121 MDS object naming conventions like rank 1's
1122 journal is at 201.***
1124 return "%x." % (multiplier
* 0x100 + rank
)
1126 # MDS_INO_LOG_OFFSET
1127 self
.erase_metadata_objects(obj_prefix(2))
1128 # MDS_INO_LOG_BACKUP_OFFSET
1129 self
.erase_metadata_objects(obj_prefix(3))
1130 # MDS_INO_LOG_POINTER_OFFSET
1131 self
.erase_metadata_objects(obj_prefix(4))
1132 # MDSTables & SessionMap
1133 self
.erase_metadata_objects("mds{rank:d}_".format(rank
=rank
))
1138 Override this to set a different
1142 def _run_tool(self
, tool
, args
, rank
=None, quiet
=False):
1143 # Tests frequently have [client] configuration that jacks up
1144 # the objecter log level (unlikely to be interesting here)
1145 # and does not set the mds log level (very interesting here)
1147 base_args
= [os
.path
.join(self
._prefix
, tool
), '--debug-mds=1', '--debug-objecter=1']
1149 base_args
= [os
.path
.join(self
._prefix
, tool
), '--debug-mds=4', '--debug-objecter=1']
1151 if rank
is not None:
1152 base_args
.extend(["--rank", "%d" % rank
])
1154 t1
= datetime
.datetime
.now()
1155 r
= self
.tool_remote
.run(
1156 args
=base_args
+ args
,
1157 stdout
=StringIO()).stdout
.getvalue().strip()
1158 duration
= datetime
.datetime
.now() - t1
1159 log
.info("Ran {0} in time {1}, result:\n{2}".format(
1160 base_args
+ args
, duration
, r
1165 def tool_remote(self
):
1167 An arbitrary remote to use when invoking recovery tools. Use an MDS host because
1168 it'll definitely have keys with perms to access cephfs metadata pool. This is public
1169 so that tests can use this remote to go get locally written output files from the tools.
1171 mds_id
= self
.mds_ids
[0]
1172 return self
.mds_daemons
[mds_id
].remote
1174 def journal_tool(self
, args
, rank
=None, quiet
=False):
1176 Invoke cephfs-journal-tool with the passed arguments, and return its stdout
1178 return self
._run
_tool
("cephfs-journal-tool", args
, rank
, quiet
)
1180 def table_tool(self
, args
, quiet
=False):
1182 Invoke cephfs-table-tool with the passed arguments, and return its stdout
1184 return self
._run
_tool
("cephfs-table-tool", args
, None, quiet
)
1186 def data_scan(self
, args
, quiet
=False, worker_count
=1):
1188 Invoke cephfs-data-scan with the passed arguments, and return its stdout
1190 :param worker_count: if greater than 1, multiple workers will be run
1191 in parallel and the return value will be None
1196 for n
in range(0, worker_count
):
1197 if worker_count
> 1:
1198 # data-scan args first token is a command, followed by args to it.
1199 # insert worker arguments after the command.
1201 worker_args
= [cmd
] + ["--worker_n", n
.__str
__(), "--worker_m", worker_count
.__str
__()] + args
[1:]
1205 workers
.append(Greenlet
.spawn(lambda wargs
=worker_args
:
1206 self
._run
_tool
("cephfs-data-scan", wargs
, None, quiet
)))
1211 if worker_count
== 1:
1212 return workers
[0].value
1217 return self
.is_pool_full(self
.get_data_pool_name())