]> git.proxmox.com Git - ceph.git/blame - ceph/qa/tasks/cephfs/test_misc.py
update sources to v12.1.1
[ceph.git] / ceph / qa / tasks / cephfs / test_misc.py
CommitLineData
7c673cae
FG
1
2from unittest import SkipTest
3from tasks.cephfs.fuse_mount import FuseMount
4from tasks.cephfs.cephfs_test_case import CephFSTestCase
5from teuthology.orchestra.run import CommandFailedError
6import errno
7import time
8
31f18b77 9
7c673cae
FG
10class TestMisc(CephFSTestCase):
11 CLIENTS_REQUIRED = 2
12
13 LOAD_SETTINGS = ["mds_session_autoclose"]
14 mds_session_autoclose = None
15
16 def test_getattr_caps(self):
17 """
18 Check if MDS recognizes the 'mask' parameter of open request.
19 The paramter allows client to request caps when opening file
20 """
21
22 if not isinstance(self.mount_a, FuseMount):
23 raise SkipTest("Require FUSE client")
24
25 # Enable debug. Client will requests CEPH_CAP_XATTR_SHARED
26 # on lookup/open
27 self.mount_b.umount_wait()
28 self.set_conf('client', 'client debug getattr caps', 'true')
29 self.mount_b.mount()
30 self.mount_b.wait_until_mounted()
31
32 # create a file and hold it open. MDS will issue CEPH_CAP_EXCL_*
33 # to mount_a
34 p = self.mount_a.open_background("testfile")
35 self.mount_b.wait_for_visible("testfile")
36
37 # this tiggers a lookup request and an open request. The debug
38 # code will check if lookup/open reply contains xattrs
39 self.mount_b.run_shell(["cat", "testfile"])
40
41 self.mount_a.kill_background(p)
42
43 def test_fs_new(self):
44 data_pool_name = self.fs.get_data_pool_name()
45
46 self.fs.mds_stop()
47 self.fs.mds_fail()
48
49 self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name,
50 '--yes-i-really-mean-it')
51
52 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
53 self.fs.metadata_pool_name,
54 self.fs.metadata_pool_name,
55 '--yes-i-really-really-mean-it')
56 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
57 self.fs.metadata_pool_name,
58 self.fs.get_pgs_per_fs_pool().__str__())
59
60 dummyfile = '/etc/fstab'
61
62 self.fs.put_metadata_object_raw("key", dummyfile)
63
224ce89b
WB
64 def get_pool_df(fs, name):
65 try:
66 return fs.get_pool_df(name)['objects'] > 0
67 except RuntimeError as e:
68 return False
7c673cae 69
224ce89b 70 self.wait_until_true(lambda: get_pool_df(self.fs, self.fs.metadata_pool_name), timeout=30)
7c673cae
FG
71
72 try:
73 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
74 self.fs.metadata_pool_name,
75 data_pool_name)
76 except CommandFailedError as e:
77 self.assertEqual(e.exitstatus, errno.EINVAL)
78 else:
79 raise AssertionError("Expected EINVAL")
80
81 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
82 self.fs.metadata_pool_name,
83 data_pool_name, "--force")
84
85 self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name,
86 '--yes-i-really-mean-it')
87
88
89 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
90 self.fs.metadata_pool_name,
91 self.fs.metadata_pool_name,
92 '--yes-i-really-really-mean-it')
93 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
94 self.fs.metadata_pool_name,
95 self.fs.get_pgs_per_fs_pool().__str__())
96 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
97 self.fs.metadata_pool_name,
98 data_pool_name)
99
100 def test_evict_client(self):
101 """
102 Check that a slow client session won't get evicted if it's the
103 only session
104 """
105
31f18b77 106 self.mount_b.umount_wait()
7c673cae
FG
107 ls_data = self.fs.mds_asok(['session', 'ls'])
108 self.assert_session_count(1, ls_data)
109
31f18b77
FG
110 self.mount_a.kill()
111 self.mount_a.kill_cleanup()
7c673cae
FG
112
113 time.sleep(self.mds_session_autoclose * 1.5)
114 ls_data = self.fs.mds_asok(['session', 'ls'])
115 self.assert_session_count(1, ls_data)
116
117 self.mount_a.mount()
118 self.mount_a.wait_until_mounted()
119 self.mount_b.mount()
120 self.mount_b.wait_until_mounted()
121
122 ls_data = self._session_list()
123 self.assert_session_count(2, ls_data)
124
125 self.mount_a.kill()
126 self.mount_a.kill()
127 self.mount_b.kill_cleanup()
128 self.mount_b.kill_cleanup()
129
130 time.sleep(self.mds_session_autoclose * 1.5)
131 ls_data = self.fs.mds_asok(['session', 'ls'])
132 self.assert_session_count(1, ls_data)