]> git.proxmox.com Git - ceph.git/blame - ceph/qa/tasks/cephfs/test_misc.py
update sources to v12.1.3
[ceph.git] / ceph / qa / tasks / cephfs / test_misc.py
CommitLineData
7c673cae
FG
1
2from unittest import SkipTest
3from tasks.cephfs.fuse_mount import FuseMount
4from tasks.cephfs.cephfs_test_case import CephFSTestCase
5from teuthology.orchestra.run import CommandFailedError
6import errno
7import time
d2e6a577 8import json
7c673cae 9
31f18b77 10
7c673cae
FG
11class TestMisc(CephFSTestCase):
12 CLIENTS_REQUIRED = 2
13
14 LOAD_SETTINGS = ["mds_session_autoclose"]
15 mds_session_autoclose = None
16
17 def test_getattr_caps(self):
18 """
19 Check if MDS recognizes the 'mask' parameter of open request.
20 The paramter allows client to request caps when opening file
21 """
22
23 if not isinstance(self.mount_a, FuseMount):
24 raise SkipTest("Require FUSE client")
25
26 # Enable debug. Client will requests CEPH_CAP_XATTR_SHARED
27 # on lookup/open
28 self.mount_b.umount_wait()
29 self.set_conf('client', 'client debug getattr caps', 'true')
30 self.mount_b.mount()
31 self.mount_b.wait_until_mounted()
32
33 # create a file and hold it open. MDS will issue CEPH_CAP_EXCL_*
34 # to mount_a
35 p = self.mount_a.open_background("testfile")
36 self.mount_b.wait_for_visible("testfile")
37
38 # this tiggers a lookup request and an open request. The debug
39 # code will check if lookup/open reply contains xattrs
40 self.mount_b.run_shell(["cat", "testfile"])
41
42 self.mount_a.kill_background(p)
43
44 def test_fs_new(self):
45 data_pool_name = self.fs.get_data_pool_name()
46
47 self.fs.mds_stop()
48 self.fs.mds_fail()
49
50 self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name,
51 '--yes-i-really-mean-it')
52
53 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
54 self.fs.metadata_pool_name,
55 self.fs.metadata_pool_name,
56 '--yes-i-really-really-mean-it')
57 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
58 self.fs.metadata_pool_name,
59 self.fs.get_pgs_per_fs_pool().__str__())
60
61 dummyfile = '/etc/fstab'
62
63 self.fs.put_metadata_object_raw("key", dummyfile)
64
224ce89b
WB
65 def get_pool_df(fs, name):
66 try:
67 return fs.get_pool_df(name)['objects'] > 0
68 except RuntimeError as e:
69 return False
7c673cae 70
224ce89b 71 self.wait_until_true(lambda: get_pool_df(self.fs, self.fs.metadata_pool_name), timeout=30)
7c673cae
FG
72
73 try:
74 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
75 self.fs.metadata_pool_name,
76 data_pool_name)
77 except CommandFailedError as e:
78 self.assertEqual(e.exitstatus, errno.EINVAL)
79 else:
80 raise AssertionError("Expected EINVAL")
81
82 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
83 self.fs.metadata_pool_name,
84 data_pool_name, "--force")
85
86 self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name,
87 '--yes-i-really-mean-it')
88
89
90 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
91 self.fs.metadata_pool_name,
92 self.fs.metadata_pool_name,
93 '--yes-i-really-really-mean-it')
94 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
95 self.fs.metadata_pool_name,
96 self.fs.get_pgs_per_fs_pool().__str__())
97 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
98 self.fs.metadata_pool_name,
99 data_pool_name)
100
101 def test_evict_client(self):
102 """
103 Check that a slow client session won't get evicted if it's the
104 only session
105 """
106
31f18b77 107 self.mount_b.umount_wait()
7c673cae
FG
108 ls_data = self.fs.mds_asok(['session', 'ls'])
109 self.assert_session_count(1, ls_data)
110
31f18b77
FG
111 self.mount_a.kill()
112 self.mount_a.kill_cleanup()
7c673cae
FG
113
114 time.sleep(self.mds_session_autoclose * 1.5)
115 ls_data = self.fs.mds_asok(['session', 'ls'])
116 self.assert_session_count(1, ls_data)
117
118 self.mount_a.mount()
119 self.mount_a.wait_until_mounted()
120 self.mount_b.mount()
121 self.mount_b.wait_until_mounted()
122
123 ls_data = self._session_list()
124 self.assert_session_count(2, ls_data)
125
126 self.mount_a.kill()
127 self.mount_a.kill()
128 self.mount_b.kill_cleanup()
129 self.mount_b.kill_cleanup()
130
131 time.sleep(self.mds_session_autoclose * 1.5)
132 ls_data = self.fs.mds_asok(['session', 'ls'])
133 self.assert_session_count(1, ls_data)
d2e6a577
FG
134
135 def test_filtered_df(self):
136 pool_name = self.fs.get_data_pool_name()
137 raw_df = self.fs.get_pool_df(pool_name)
138 raw_avail = float(raw_df["max_avail"])
139 out = self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'get',
140 pool_name, 'size',
141 '-f', 'json-pretty')
142 j = json.loads(out)
143 pool_size = int(j['size'])
144
145 proc = self.mount_a.run_shell(['df', '.'])
146 output = proc.stdout.getvalue()
147 fs_avail = output.split('\n')[1].split()[3]
148 fs_avail = float(fs_avail) * 1024
149
150 ratio = (raw_avail / pool_size) / fs_avail
151 assert 0.9 < ratio < 1.1