]> git.proxmox.com Git - ceph.git/blame - ceph/qa/tasks/cephfs/test_admin.py
bump version to 15.2.1-pve1
[ceph.git] / ceph / qa / tasks / cephfs / test_admin.py
CommitLineData
92f5a8d4
TL
1from teuthology.orchestra.run import CommandFailedError
2
92f5a8d4
TL
3from tasks.cephfs.cephfs_test_case import CephFSTestCase
4from tasks.cephfs.fuse_mount import FuseMount
5
6from tasks.cephfs.filesystem import FileLayout
7
8class TestAdminCommands(CephFSTestCase):
9 """
10 Tests for administration command.
11 """
12
13 CLIENTS_REQUIRED = 1
14 MDSS_REQUIRED = 1
15
16 def test_fs_status(self):
17 """
18 That `ceph fs status` command functions.
19 """
20
21 s = self.fs.mon_manager.raw_cluster_cmd("fs", "status")
22 self.assertTrue("active" in s)
23
24 def _setup_ec_pools(self, n, metadata=True, overwrites=True):
25 if metadata:
26 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', n+"-meta", "8")
27 cmd = ['osd', 'erasure-code-profile', 'set', n+"-profile", "m=2", "k=2", "crush-failure-domain=osd"]
28 self.fs.mon_manager.raw_cluster_cmd(*cmd)
29 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', n+"-data", "8", "erasure", n+"-profile")
30 if overwrites:
31 self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true')
32
33 def test_add_data_pool_root(self):
34 """
35 That a new data pool can be added and used for the root directory.
36 """
37
38 p = self.fs.add_data_pool("foo")
39 self.fs.set_dir_layout(self.mount_a, ".", FileLayout(pool=p))
40
41 def test_add_data_pool_subdir(self):
42 """
43 That a new data pool can be added and used for a sub-directory.
44 """
45
46 p = self.fs.add_data_pool("foo")
9f95a23c 47 self.mount_a.run_shell("mkdir subdir")
92f5a8d4
TL
48 self.fs.set_dir_layout(self.mount_a, "subdir", FileLayout(pool=p))
49
50 def test_add_data_pool_ec(self):
51 """
52 That a new EC data pool can be added.
53 """
54
55 n = "test_add_data_pool_ec"
56 self._setup_ec_pools(n, metadata=False)
57 p = self.fs.add_data_pool(n+"-data", create=False)
58
59 def test_new_default_ec(self):
60 """
61 That a new file system warns/fails with an EC default data pool.
62 """
63
64 self.fs.delete_all_filesystems()
65 n = "test_new_default_ec"
66 self._setup_ec_pools(n)
67 try:
68 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data")
69 except CommandFailedError as e:
70 if e.exitstatus == 22:
71 pass
72 else:
73 raise
74 else:
75 raise RuntimeError("expected failure")
76
77 def test_new_default_ec_force(self):
78 """
79 That a new file system succeeds with an EC default data pool with --force.
80 """
81
82 self.fs.delete_all_filesystems()
83 n = "test_new_default_ec_force"
84 self._setup_ec_pools(n)
85 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
86
87 def test_new_default_ec_no_overwrite(self):
88 """
89 That a new file system fails with an EC default data pool without overwrite.
90 """
91
92 self.fs.delete_all_filesystems()
93 n = "test_new_default_ec_no_overwrite"
94 self._setup_ec_pools(n, overwrites=False)
95 try:
96 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data")
97 except CommandFailedError as e:
98 if e.exitstatus == 22:
99 pass
100 else:
101 raise
102 else:
103 raise RuntimeError("expected failure")
104 # and even with --force !
105 try:
106 self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
107 except CommandFailedError as e:
108 if e.exitstatus == 22:
109 pass
110 else:
111 raise
112 else:
113 raise RuntimeError("expected failure")
114
115class TestConfigCommands(CephFSTestCase):
116 """
117 Test that daemons and clients respond to the otherwise rarely-used
118 runtime config modification operations.
119 """
120
121 CLIENTS_REQUIRED = 1
122 MDSS_REQUIRED = 1
123
124 def test_ceph_config_show(self):
125 """
126 That I can successfully show MDS configuration.
127 """
128
129 names = self.fs.get_rank_names()
130 for n in names:
131 s = self.fs.mon_manager.raw_cluster_cmd("config", "show", "mds."+n)
132 self.assertTrue("NAME" in s)
133 self.assertTrue("mon_host" in s)
134
135 def test_client_config(self):
136 """
137 That I can successfully issue asok "config set" commands
138
139 :return:
140 """
141
142 if not isinstance(self.mount_a, FuseMount):
9f95a23c 143 self.skipTest("Test only applies to FUSE clients")
92f5a8d4
TL
144
145 test_key = "client_cache_size"
146 test_val = "123"
147 self.mount_a.admin_socket(['config', 'set', test_key, test_val])
148 out = self.mount_a.admin_socket(['config', 'get', test_key])
149 self.assertEqual(out[test_key], test_val)
150
151 self.mount_a.write_n_mb("file.bin", 1);
152
153 # Implicitly asserting that things don't have lockdep error in shutdown
154 self.mount_a.umount_wait(require_clean=True)
155 self.fs.mds_stop()
156
157 def test_mds_config_asok(self):
158 test_key = "mds_max_purge_ops"
159 test_val = "123"
160 self.fs.mds_asok(['config', 'set', test_key, test_val])
161 out = self.fs.mds_asok(['config', 'get', test_key])
162 self.assertEqual(out[test_key], test_val)
163
164 # Implicitly asserting that things don't have lockdep error in shutdown
165 self.mount_a.umount_wait(require_clean=True)
166 self.fs.mds_stop()
167
168 def test_mds_config_tell(self):
169 test_key = "mds_max_purge_ops"
170 test_val = "123"
171
172 mds_id = self.fs.get_lone_mds_id()
173 self.fs.mon_manager.raw_cluster_cmd("tell", "mds.{0}".format(mds_id), "injectargs",
174 "--{0}={1}".format(test_key, test_val))
175
176 # Read it back with asok because there is no `tell` equivalent
177 out = self.fs.mds_asok(['config', 'get', test_key])
178 self.assertEqual(out[test_key], test_val)
179
180 # Implicitly asserting that things don't have lockdep error in shutdown
181 self.mount_a.umount_wait(require_clean=True)
182 self.fs.mds_stop()