]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_misc.py
2 from unittest
import SkipTest
3 from tasks
.cephfs
.fuse_mount
import FuseMount
4 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
5 from teuthology
.orchestra
.run
import CommandFailedError
9 class TestMisc(CephFSTestCase
):
12 LOAD_SETTINGS
= ["mds_session_autoclose"]
13 mds_session_autoclose
= None
15 def test_getattr_caps(self
):
17 Check if MDS recognizes the 'mask' parameter of open request.
18 The paramter allows client to request caps when opening file
21 if not isinstance(self
.mount_a
, FuseMount
):
22 raise SkipTest("Require FUSE client")
24 # Enable debug. Client will requests CEPH_CAP_XATTR_SHARED
26 self
.mount_b
.umount_wait()
27 self
.set_conf('client', 'client debug getattr caps', 'true')
29 self
.mount_b
.wait_until_mounted()
31 # create a file and hold it open. MDS will issue CEPH_CAP_EXCL_*
33 p
= self
.mount_a
.open_background("testfile")
34 self
.mount_b
.wait_for_visible("testfile")
36 # this tiggers a lookup request and an open request. The debug
37 # code will check if lookup/open reply contains xattrs
38 self
.mount_b
.run_shell(["cat", "testfile"])
40 self
.mount_a
.kill_background(p
)
42 def test_fs_new(self
):
43 data_pool_name
= self
.fs
.get_data_pool_name()
48 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'rm', self
.fs
.name
,
49 '--yes-i-really-mean-it')
51 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'delete',
52 self
.fs
.metadata_pool_name
,
53 self
.fs
.metadata_pool_name
,
54 '--yes-i-really-really-mean-it')
55 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create',
56 self
.fs
.metadata_pool_name
,
57 self
.fs
.get_pgs_per_fs_pool().__str
__())
59 dummyfile
= '/etc/fstab'
61 self
.fs
.put_metadata_object_raw("key", dummyfile
)
65 get_pool_df
= self
.fs
.get_pool_df
67 lambda: get_pool_df(self
.fs
.metadata_pool_name
)['objects'] > 0,
71 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', self
.fs
.name
,
72 self
.fs
.metadata_pool_name
,
74 except CommandFailedError
as e
:
75 self
.assertEqual(e
.exitstatus
, errno
.EINVAL
)
77 raise AssertionError("Expected EINVAL")
79 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', self
.fs
.name
,
80 self
.fs
.metadata_pool_name
,
81 data_pool_name
, "--force")
83 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'rm', self
.fs
.name
,
84 '--yes-i-really-mean-it')
87 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'delete',
88 self
.fs
.metadata_pool_name
,
89 self
.fs
.metadata_pool_name
,
90 '--yes-i-really-really-mean-it')
91 self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'pool', 'create',
92 self
.fs
.metadata_pool_name
,
93 self
.fs
.get_pgs_per_fs_pool().__str
__())
94 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'new', self
.fs
.name
,
95 self
.fs
.metadata_pool_name
,
98 def test_evict_client(self
):
100 Check that a slow client session won't get evicted if it's the
104 self
.mount_b
.umount_wait();
105 ls_data
= self
.fs
.mds_asok(['session', 'ls'])
106 self
.assert_session_count(1, ls_data
)
109 self
.mount_a
.kill_cleanup();
111 time
.sleep(self
.mds_session_autoclose
* 1.5)
112 ls_data
= self
.fs
.mds_asok(['session', 'ls'])
113 self
.assert_session_count(1, ls_data
)
116 self
.mount_a
.wait_until_mounted()
118 self
.mount_b
.wait_until_mounted()
120 ls_data
= self
._session
_list
()
121 self
.assert_session_count(2, ls_data
)
125 self
.mount_b
.kill_cleanup()
126 self
.mount_b
.kill_cleanup()
128 time
.sleep(self
.mds_session_autoclose
* 1.5)
129 ls_data
= self
.fs
.mds_asok(['session', 'ls'])
130 self
.assert_session_count(1, ls_data
)