]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_misc.py
2 from unittest
import SkipTest
3 from tasks
.cephfs
.fuse_mount
import FuseMount
4 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
5 from teuthology
.orchestra
.run
import CommandFailedError
10 class TestMisc(CephFSTestCase
):
13 LOAD_SETTINGS
= ["mds_session_autoclose"]
14 mds_session_autoclose
= None
16 def test_getattr_caps(self
):
18 Check if MDS recognizes the 'mask' parameter of open request.
19 The paramter allows client to request caps when opening file
22 if not isinstance(self
.mount_a
, FuseMount
):
23 raise SkipTest("Require FUSE client")
25 # Enable debug. Client will requests CEPH_CAP_XATTR_SHARED
27 self
.mount_b
.umount_wait()
28 self
.set_conf('client', 'client debug getattr caps', 'true')
30 self
.mount_b
.wait_until_mounted()
32 # create a file and hold it open. MDS will issue CEPH_CAP_EXCL_*
34 p
= self
.mount_a
.open_background("testfile")
35 self
.mount_b
.wait_for_visible("testfile")
37 # this tiggers a lookup request and an open request. The debug
38 # code will check if lookup/open reply contains xattrs
39 self
.mount_b
.run_shell(["cat", "testfile"])
41 self
.mount_a
.kill_background(p
)
43 def test_fs_new(self
):
44 data_pool_name
= self
.fs
.get_data_pool_name()
49 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'rm', self
.fs
.name
,
50 '--yes-i-really-mean-it')
52 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'delete',
53 self
.fs
.metadata_pool_name
,
54 self
.fs
.metadata_pool_name
,
55 '--yes-i-really-really-mean-it')
56 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create',
57 self
.fs
.metadata_pool_name
,
58 self
.fs
.get_pgs_per_fs_pool().__str
__())
60 dummyfile
= '/etc/fstab'
62 self
.fs
.put_metadata_object_raw("key", dummyfile
)
64 def get_pool_df(fs
, name
):
66 return fs
.get_pool_df(name
)['objects'] > 0
67 except RuntimeError as e
:
70 self
.wait_until_true(lambda: get_pool_df(self
.fs
, self
.fs
.metadata_pool_name
), timeout
=30)
73 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', self
.fs
.name
,
74 self
.fs
.metadata_pool_name
,
76 except CommandFailedError
as e
:
77 self
.assertEqual(e
.exitstatus
, errno
.EINVAL
)
79 raise AssertionError("Expected EINVAL")
81 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', self
.fs
.name
,
82 self
.fs
.metadata_pool_name
,
83 data_pool_name
, "--force")
85 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'rm', self
.fs
.name
,
86 '--yes-i-really-mean-it')
89 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'delete',
90 self
.fs
.metadata_pool_name
,
91 self
.fs
.metadata_pool_name
,
92 '--yes-i-really-really-mean-it')
93 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create',
94 self
.fs
.metadata_pool_name
,
95 self
.fs
.get_pgs_per_fs_pool().__str
__())
96 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', self
.fs
.name
,
97 self
.fs
.metadata_pool_name
,
100 def test_evict_client(self
):
102 Check that a slow client session won't get evicted if it's the
106 self
.mount_b
.umount_wait()
107 ls_data
= self
.fs
.mds_asok(['session', 'ls'])
108 self
.assert_session_count(1, ls_data
)
111 self
.mount_a
.kill_cleanup()
113 time
.sleep(self
.mds_session_autoclose
* 1.5)
114 ls_data
= self
.fs
.mds_asok(['session', 'ls'])
115 self
.assert_session_count(1, ls_data
)
118 self
.mount_a
.wait_until_mounted()
120 self
.mount_b
.wait_until_mounted()
122 ls_data
= self
._session
_list
()
123 self
.assert_session_count(2, ls_data
)
127 self
.mount_b
.kill_cleanup()
128 self
.mount_b
.kill_cleanup()
130 time
.sleep(self
.mds_session_autoclose
* 1.5)
131 ls_data
= self
.fs
.mds_asok(['session', 'ls'])
132 self
.assert_session_count(1, ls_data
)