]> git.proxmox.com Git - ceph.git/blame - ceph/qa/tasks/cephfs/test_misc.py
bump version to 12.0.3-pve3
[ceph.git] / ceph / qa / tasks / cephfs / test_misc.py
CommitLineData
7c673cae
FG
1
2from unittest import SkipTest
3from tasks.cephfs.fuse_mount import FuseMount
4from tasks.cephfs.cephfs_test_case import CephFSTestCase
5from teuthology.orchestra.run import CommandFailedError
6import errno
7import time
8
9class TestMisc(CephFSTestCase):
10 CLIENTS_REQUIRED = 2
11
12 LOAD_SETTINGS = ["mds_session_autoclose"]
13 mds_session_autoclose = None
14
15 def test_getattr_caps(self):
16 """
17 Check if MDS recognizes the 'mask' parameter of open request.
18 The paramter allows client to request caps when opening file
19 """
20
21 if not isinstance(self.mount_a, FuseMount):
22 raise SkipTest("Require FUSE client")
23
24 # Enable debug. Client will requests CEPH_CAP_XATTR_SHARED
25 # on lookup/open
26 self.mount_b.umount_wait()
27 self.set_conf('client', 'client debug getattr caps', 'true')
28 self.mount_b.mount()
29 self.mount_b.wait_until_mounted()
30
31 # create a file and hold it open. MDS will issue CEPH_CAP_EXCL_*
32 # to mount_a
33 p = self.mount_a.open_background("testfile")
34 self.mount_b.wait_for_visible("testfile")
35
36 # this tiggers a lookup request and an open request. The debug
37 # code will check if lookup/open reply contains xattrs
38 self.mount_b.run_shell(["cat", "testfile"])
39
40 self.mount_a.kill_background(p)
41
42 def test_fs_new(self):
43 data_pool_name = self.fs.get_data_pool_name()
44
45 self.fs.mds_stop()
46 self.fs.mds_fail()
47
48 self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name,
49 '--yes-i-really-mean-it')
50
51 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
52 self.fs.metadata_pool_name,
53 self.fs.metadata_pool_name,
54 '--yes-i-really-really-mean-it')
55 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
56 self.fs.metadata_pool_name,
57 self.fs.get_pgs_per_fs_pool().__str__())
58
59 dummyfile = '/etc/fstab'
60
61 self.fs.put_metadata_object_raw("key", dummyfile)
62
63 timeout = 10
64
65 get_pool_df = self.fs.get_pool_df
66 self.wait_until_true(
67 lambda: get_pool_df(self.fs.metadata_pool_name)['objects'] > 0,
68 timeout=timeout)
69
70 try:
71 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
72 self.fs.metadata_pool_name,
73 data_pool_name)
74 except CommandFailedError as e:
75 self.assertEqual(e.exitstatus, errno.EINVAL)
76 else:
77 raise AssertionError("Expected EINVAL")
78
79 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
80 self.fs.metadata_pool_name,
81 data_pool_name, "--force")
82
83 self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name,
84 '--yes-i-really-mean-it')
85
86
87 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
88 self.fs.metadata_pool_name,
89 self.fs.metadata_pool_name,
90 '--yes-i-really-really-mean-it')
91 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
92 self.fs.metadata_pool_name,
93 self.fs.get_pgs_per_fs_pool().__str__())
94 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
95 self.fs.metadata_pool_name,
96 data_pool_name)
97
98 def test_evict_client(self):
99 """
100 Check that a slow client session won't get evicted if it's the
101 only session
102 """
103
104 self.mount_b.umount_wait();
105 ls_data = self.fs.mds_asok(['session', 'ls'])
106 self.assert_session_count(1, ls_data)
107
108 self.mount_a.kill();
109 self.mount_a.kill_cleanup();
110
111 time.sleep(self.mds_session_autoclose * 1.5)
112 ls_data = self.fs.mds_asok(['session', 'ls'])
113 self.assert_session_count(1, ls_data)
114
115 self.mount_a.mount()
116 self.mount_a.wait_until_mounted()
117 self.mount_b.mount()
118 self.mount_b.wait_until_mounted()
119
120 ls_data = self._session_list()
121 self.assert_session_count(2, ls_data)
122
123 self.mount_a.kill()
124 self.mount_a.kill()
125 self.mount_b.kill_cleanup()
126 self.mount_b.kill_cleanup()
127
128 time.sleep(self.mds_session_autoclose * 1.5)
129 ls_data = self.fs.mds_asok(['session', 'ls'])
130 self.assert_session_count(1, ls_data)