+from io import StringIO
-from unittest import SkipTest
from tasks.cephfs.fuse_mount import FuseMount
from tasks.cephfs.cephfs_test_case import CephFSTestCase
-from teuthology.orchestra.run import CommandFailedError, ConnectionLostError
+from teuthology.exceptions import CommandFailedError
import errno
import time
import json
import logging
-import time
log = logging.getLogger(__name__)
class TestMisc(CephFSTestCase):
CLIENTS_REQUIRED = 2
+ def test_statfs_on_deleted_fs(self):
+ """
+ That statfs does not cause monitors to SIGSEGV after fs deletion.
+ """
+
+ self.mount_b.umount_wait()
+ self.mount_a.run_shell_payload("stat -f .")
+ self.fs.delete_all_filesystems()
+ # This will hang either way, run in background.
+ p = self.mount_a.run_shell_payload("stat -f .", wait=False, timeout=60, check_status=False)
+ time.sleep(30)
+ self.assertFalse(p.finished)
+ # the process is stuck in uninterruptible sleep, just kill the mount
+ self.mount_a.umount_wait(force=True)
+ p.wait()
+
def test_getattr_caps(self):
"""
Check if MDS recognizes the 'mask' parameter of open request.
"""
if not isinstance(self.mount_a, FuseMount):
- raise SkipTest("Require FUSE client")
+ self.skipTest("Require FUSE client")
# Enable debug. Client will requests CEPH_CAP_XATTR_SHARED
# on lookup/open
self.mount_b.umount_wait()
self.set_conf('client', 'client debug getattr caps', 'true')
- self.mount_b.mount()
- self.mount_b.wait_until_mounted()
+ self.mount_b.mount_wait()
# create a file and hold it open. MDS will issue CEPH_CAP_EXCL_*
# to mount_a
t = time.time()
rctime = self.mount_a.getfattr(".", "ceph.dir.rctime")
log.info("rctime = {}".format(rctime))
- self.assertGreaterEqual(rctime, t-10)
+ self.assertGreaterEqual(float(rctime), t - 10)
def test_fs_new(self):
self.mount_a.umount_wait()
data_pool_name = self.fs.get_data_pool_name()
- self.fs.mds_stop()
- self.fs.mds_fail()
+ self.fs.fail()
self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name,
'--yes-i-really-mean-it')
'--yes-i-really-really-mean-it')
self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
self.fs.metadata_pool_name,
- self.fs.get_pgs_per_fs_pool().__str__())
-
- dummyfile = '/etc/fstab'
+ '--pg_num_min', str(self.fs.pg_num_min))
- self.fs.put_metadata_object_raw("key", dummyfile)
+ # insert a garbage object
+ self.fs.radosm(["put", "foo", "-"], stdin=StringIO("bar"))
def get_pool_df(fs, name):
try:
return fs.get_pool_df(name)['objects'] > 0
- except RuntimeError as e:
+ except RuntimeError:
return False
self.wait_until_true(lambda: get_pool_df(self.fs, self.fs.metadata_pool_name), timeout=30)
self.fs.metadata_pool_name,
data_pool_name, "--force")
+ self.fs.mon_manager.raw_cluster_cmd('fs', 'fail', self.fs.name)
+
self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name,
'--yes-i-really-mean-it')
-
self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
self.fs.metadata_pool_name,
self.fs.metadata_pool_name,
'--yes-i-really-really-mean-it')
self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
self.fs.metadata_pool_name,
- self.fs.get_pgs_per_fs_pool().__str__())
+ '--pg_num_min', str(self.fs.pg_num_min))
self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
self.fs.metadata_pool_name,
data_pool_name)
self.mount_b.wait_for_visible()
# Simulate client death
- self.mount_a.kill()
+ self.mount_a.suspend_netns()
try:
# The waiter should get stuck waiting for the capability
cap_waited, session_timeout
))
- self.assertTrue(self.mount_a.is_blacklisted())
- cap_holder.stdin.close()
- try:
- cap_holder.wait()
- except (CommandFailedError, ConnectionLostError):
- # We killed it (and possibly its node), so it raises an error
- pass
+ self.assertTrue(self.mds_cluster.is_addr_blocklisted(
+ self.mount_a.get_global_addr()))
+ self.mount_a._kill_background(cap_holder)
finally:
- self.mount_a.kill_cleanup()
-
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.resume_netns()
def test_filtered_df(self):
pool_name = self.fs.get_data_pool_name()
out = self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'get',
pool_name, 'size',
'-f', 'json-pretty')
- j = json.loads(out)
- pool_size = int(j['size'])
+ _ = json.loads(out)
proc = self.mount_a.run_shell(['df', '.'])
output = proc.stdout.getvalue()
info = self.fs.mds_asok(['dump', 'inode', hex(ino)])
assert info['path'] == "/foo"
+ def test_fs_lsflags(self):
+ """
+ Check that the lsflags displays the default state and the new state of flags
+ """
+ # Set some flags
+ self.fs.set_joinable(False)
+ self.fs.set_allow_new_snaps(False)
+ self.fs.set_allow_standby_replay(True)
+
+ lsflags = json.loads(self.fs.mon_manager.raw_cluster_cmd('fs', 'lsflags',
+ self.fs.name,
+ "--format=json-pretty"))
+ self.assertEqual(lsflags["joinable"], False)
+ self.assertEqual(lsflags["allow_snaps"], False)
+ self.assertEqual(lsflags["allow_multimds_snaps"], True)
+ self.assertEqual(lsflags["allow_standby_replay"], True)
class TestCacheDrop(CephFSTestCase):
CLIENTS_REQUIRED = 1
def _run_drop_cache_cmd(self, timeout=None):
result = None
- mds_id = self.fs.get_lone_mds_id()
+ args = ["cache", "drop"]
if timeout is not None:
- result = self.fs.mon_manager.raw_cluster_cmd("tell", "mds.{0}".format(mds_id),
- "cache", "drop", str(timeout))
- else:
- result = self.fs.mon_manager.raw_cluster_cmd("tell", "mds.{0}".format(mds_id),
- "cache", "drop")
- return json.loads(result)
+ args.append(str(timeout))
+ result = self.fs.rank_tell(args)
+ return result
def _setup(self, max_caps=20, threshold=400):
# create some files
here.
"""
self._setup()
- self.mount_a.kill()
+ self.mount_a.suspend_netns()
# Note: recall is subject to the timeout. The journal flush will
# be delayed due to the client being dead.
result = self._run_drop_cache_cmd(timeout=5)
# particular operation causing this is journal flush which causes the
# MDS to wait wait for cap revoke.
#self.assertEqual(0, result['trim_cache']['trimmed'])
- self.mount_a.kill_cleanup()
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.resume_netns()
def test_drop_cache_command_dead(self):
"""
here.
"""
self._setup()
- self.mount_a.kill()
+ self.mount_a.suspend_netns()
result = self._run_drop_cache_cmd()
self.assertEqual(result['client_recall']['return_code'], 0)
self.assertEqual(result['flush_journal']['return_code'], 0)
# stale session will be autoclosed at mdsmap['session_timeout']). The
# particular operation causing this is journal flush which causes the
# MDS to wait wait for cap revoke.
- self.mount_a.kill_cleanup()
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.resume_netns()