]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_misc.py
4158538fd41d25f3d17c15171300f28ba8fb6751
[ceph.git] / ceph / qa / tasks / cephfs / test_misc.py
1
2 from unittest import SkipTest
3 from tasks.cephfs.fuse_mount import FuseMount
4 from tasks.cephfs.cephfs_test_case import CephFSTestCase
5 from teuthology.orchestra.run import CommandFailedError, ConnectionLostError
6 import errno
7 import time
8 import json
9 import logging
10
11 log = logging.getLogger(__name__)
12
13 class TestMisc(CephFSTestCase):
14 CLIENTS_REQUIRED = 2
15
16 LOAD_SETTINGS = ["mds_session_autoclose"]
17 mds_session_autoclose = None
18
19 def test_getattr_caps(self):
20 """
21 Check if MDS recognizes the 'mask' parameter of open request.
22 The paramter allows client to request caps when opening file
23 """
24
25 if not isinstance(self.mount_a, FuseMount):
26 raise SkipTest("Require FUSE client")
27
28 # Enable debug. Client will requests CEPH_CAP_XATTR_SHARED
29 # on lookup/open
30 self.mount_b.umount_wait()
31 self.set_conf('client', 'client debug getattr caps', 'true')
32 self.mount_b.mount()
33 self.mount_b.wait_until_mounted()
34
35 # create a file and hold it open. MDS will issue CEPH_CAP_EXCL_*
36 # to mount_a
37 p = self.mount_a.open_background("testfile")
38 self.mount_b.wait_for_visible("testfile")
39
40 # this tiggers a lookup request and an open request. The debug
41 # code will check if lookup/open reply contains xattrs
42 self.mount_b.run_shell(["cat", "testfile"])
43
44 self.mount_a.kill_background(p)
45
46 def test_fs_new(self):
47 data_pool_name = self.fs.get_data_pool_name()
48
49 self.fs.mds_stop()
50 self.fs.mds_fail()
51
52 self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name,
53 '--yes-i-really-mean-it')
54
55 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
56 self.fs.metadata_pool_name,
57 self.fs.metadata_pool_name,
58 '--yes-i-really-really-mean-it')
59 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
60 self.fs.metadata_pool_name,
61 self.fs.get_pgs_per_fs_pool().__str__())
62
63 dummyfile = '/etc/fstab'
64
65 self.fs.put_metadata_object_raw("key", dummyfile)
66
67 def get_pool_df(fs, name):
68 try:
69 return fs.get_pool_df(name)['objects'] > 0
70 except RuntimeError as e:
71 return False
72
73 self.wait_until_true(lambda: get_pool_df(self.fs, self.fs.metadata_pool_name), timeout=30)
74
75 try:
76 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
77 self.fs.metadata_pool_name,
78 data_pool_name)
79 except CommandFailedError as e:
80 self.assertEqual(e.exitstatus, errno.EINVAL)
81 else:
82 raise AssertionError("Expected EINVAL")
83
84 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
85 self.fs.metadata_pool_name,
86 data_pool_name, "--force")
87
88 self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name,
89 '--yes-i-really-mean-it')
90
91
92 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
93 self.fs.metadata_pool_name,
94 self.fs.metadata_pool_name,
95 '--yes-i-really-really-mean-it')
96 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
97 self.fs.metadata_pool_name,
98 self.fs.get_pgs_per_fs_pool().__str__())
99 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
100 self.fs.metadata_pool_name,
101 data_pool_name)
102
103 def test_evict_client(self):
104 """
105 Check that a slow client session won't get evicted if it's the
106 only session
107 """
108
109 self.mount_b.umount_wait()
110 ls_data = self.fs.mds_asok(['session', 'ls'])
111 self.assert_session_count(1, ls_data)
112
113 self.mount_a.kill()
114 self.mount_a.kill_cleanup()
115
116 time.sleep(self.mds_session_autoclose * 1.5)
117 ls_data = self.fs.mds_asok(['session', 'ls'])
118 self.assert_session_count(1, ls_data)
119
120 self.mount_a.mount()
121 self.mount_a.wait_until_mounted()
122 self.mount_b.mount()
123 self.mount_b.wait_until_mounted()
124
125 ls_data = self._session_list()
126 self.assert_session_count(2, ls_data)
127
128 self.mount_a.kill()
129 self.mount_a.kill_cleanup()
130
131 time.sleep(self.mds_session_autoclose * 1.5)
132 ls_data = self.fs.mds_asok(['session', 'ls'])
133 self.assert_session_count(1, ls_data)
134
135 def test_cap_revoke_nonresponder(self):
136 """
137 Check that a client is evicted if it has not responded to cap revoke
138 request for configured number of seconds.
139 """
140 session_timeout = self.fs.get_var("session_timeout")
141 eviction_timeout = session_timeout / 2.0
142
143 self.fs.mds_asok(['config', 'set', 'mds_cap_revoke_eviction_timeout',
144 str(eviction_timeout)])
145
146 cap_holder = self.mount_a.open_background()
147
148 # Wait for the file to be visible from another client, indicating
149 # that mount_a has completed its network ops
150 self.mount_b.wait_for_visible()
151
152 # Simulate client death
153 self.mount_a.kill()
154
155 try:
156 # The waiter should get stuck waiting for the capability
157 # held on the MDS by the now-dead client A
158 cap_waiter = self.mount_b.write_background()
159
160 a = time.time()
161 time.sleep(eviction_timeout)
162 cap_waiter.wait()
163 b = time.time()
164 cap_waited = b - a
165 log.info("cap_waiter waited {0}s".format(cap_waited))
166
167 # check if the cap is transferred before session timeout kicked in.
168 # this is a good enough check to ensure that the client got evicted
169 # by the cap auto evicter rather than transitioning to stale state
170 # and then getting evicted.
171 self.assertLess(cap_waited, session_timeout,
172 "Capability handover took {0}, expected less than {1}".format(
173 cap_waited, session_timeout
174 ))
175
176 cap_holder.stdin.close()
177 try:
178 cap_holder.wait()
179 except (CommandFailedError, ConnectionLostError):
180 # We killed it (and possibly its node), so it raises an error
181 pass
182 finally:
183 self.mount_a.kill_cleanup()
184
185 self.mount_a.mount()
186 self.mount_a.wait_until_mounted()
187
188 def test_filtered_df(self):
189 pool_name = self.fs.get_data_pool_name()
190 raw_df = self.fs.get_pool_df(pool_name)
191 raw_avail = float(raw_df["max_avail"])
192 out = self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'get',
193 pool_name, 'size',
194 '-f', 'json-pretty')
195 j = json.loads(out)
196 pool_size = int(j['size'])
197
198 proc = self.mount_a.run_shell(['df', '.'])
199 output = proc.stdout.getvalue()
200 fs_avail = output.split('\n')[1].split()[3]
201 fs_avail = float(fs_avail) * 1024
202
203 ratio = raw_avail / fs_avail
204 assert 0.9 < ratio < 1.1