]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_snapshots.py
40a09f3a8db2b3538165430a8d5e996d4d33e5c1
[ceph.git] / ceph / qa / tasks / cephfs / test_snapshots.py
1 import sys
2 import logging
3 import signal
4 from textwrap import dedent
5 from tasks.cephfs.fuse_mount import FuseMount
6 from tasks.cephfs.cephfs_test_case import CephFSTestCase
7 from teuthology.orchestra.run import CommandFailedError, Raw
8
9 log = logging.getLogger(__name__)
10
11 MDS_RESTART_GRACE = 60
12
13 class TestSnapshots(CephFSTestCase):
14 MDSS_REQUIRED = 3
15 LOAD_SETTINGS = ["mds_max_snaps_per_dir"]
16
17 def _check_subtree(self, rank, path, status=None):
18 got_subtrees = self.fs.rank_asok(["get", "subtrees"], rank=rank, status=status)
19 for s in got_subtrees:
20 if s['dir']['path'] == path and s['auth_first'] == rank:
21 return True
22 return False
23
24 def _get_snapclient_dump(self, rank=0, status=None):
25 return self.fs.rank_asok(["dump", "snaps"], rank=rank, status=status)
26
27 def _get_snapserver_dump(self, rank=0, status=None):
28 return self.fs.rank_asok(["dump", "snaps", "--server"], rank=rank, status=status)
29
30 def _get_last_created_snap(self, rank=0, status=None):
31 return int(self._get_snapserver_dump(rank,status=status)["last_created"])
32
33 def _get_last_destroyed_snap(self, rank=0, status=None):
34 return int(self._get_snapserver_dump(rank,status=status)["last_destroyed"])
35
36 def _get_pending_snap_update(self, rank=0, status=None):
37 return self._get_snapserver_dump(rank,status=status)["pending_update"]
38
39 def _get_pending_snap_destroy(self, rank=0, status=None):
40 return self._get_snapserver_dump(rank,status=status)["pending_destroy"]
41
42 def test_kill_mdstable(self):
43 """
44 check snaptable transcation
45 """
46 if not isinstance(self.mount_a, FuseMount):
47 self.skipTest("Require FUSE client to forcibly kill mount")
48
49 self.fs.set_allow_new_snaps(True);
50 self.fs.set_max_mds(2)
51 status = self.fs.wait_for_daemons()
52
53 grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon"))
54
55 # setup subtrees
56 self.mount_a.run_shell(["mkdir", "-p", "d1/dir"])
57 self.mount_a.setfattr("d1", "ceph.dir.pin", "1")
58 self._wait_subtrees([("/d1", 1)], rank=1, path="/d1")
59
60 last_created = self._get_last_created_snap(rank=0,status=status)
61
62 # mds_kill_mdstable_at:
63 # 1: MDSTableServer::handle_prepare
64 # 2: MDSTableServer::_prepare_logged
65 # 5: MDSTableServer::handle_commit
66 # 6: MDSTableServer::_commit_logged
67 for i in [1,2,5,6]:
68 log.info("testing snapserver mds_kill_mdstable_at={0}".format(i))
69
70 status = self.fs.status()
71 rank0 = self.fs.get_rank(rank=0, status=status)
72 self.fs.rank_freeze(True, rank=0)
73 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i)], rank=0, status=status)
74 proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s1{0}".format(i)], wait=False)
75 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=0), timeout=grace*2);
76 self.delete_mds_coredump(rank0['name']);
77
78 self.fs.rank_fail(rank=0)
79 self.fs.mds_restart(rank0['name'])
80 self.wait_for_daemon_start([rank0['name']])
81 status = self.fs.wait_for_daemons()
82
83 proc.wait()
84 last_created += 1
85 self.wait_until_true(lambda: self._get_last_created_snap(rank=0) == last_created, timeout=30)
86
87 self.set_conf("mds", "mds_reconnect_timeout", "5")
88
89 self.mount_a.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
90
91 # set mds_kill_mdstable_at, also kill snapclient
92 for i in [2,5,6]:
93 log.info("testing snapserver mds_kill_mdstable_at={0}, also kill snapclient".format(i))
94 status = self.fs.status()
95 last_created = self._get_last_created_snap(rank=0, status=status)
96
97 rank0 = self.fs.get_rank(rank=0, status=status)
98 rank1 = self.fs.get_rank(rank=1, status=status)
99 self.fs.rank_freeze(True, rank=0) # prevent failover...
100 self.fs.rank_freeze(True, rank=1) # prevent failover...
101 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i)], rank=0, status=status)
102 proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s2{0}".format(i)], wait=False)
103 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=0), timeout=grace*2);
104 self.delete_mds_coredump(rank0['name']);
105
106 self.fs.rank_signal(signal.SIGKILL, rank=1)
107
108 self.mount_a.kill()
109 self.mount_a.kill_cleanup()
110
111 self.fs.rank_fail(rank=0)
112 self.fs.mds_restart(rank0['name'])
113 self.wait_for_daemon_start([rank0['name']])
114
115 self.fs.wait_for_state('up:resolve', rank=0, timeout=MDS_RESTART_GRACE)
116 if i in [2,5]:
117 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 1)
118 elif i == 6:
119 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 0)
120 self.assertGreater(self._get_last_created_snap(rank=0), last_created)
121
122 self.fs.rank_fail(rank=1)
123 self.fs.mds_restart(rank1['name'])
124 self.wait_for_daemon_start([rank1['name']])
125 self.fs.wait_for_state('up:active', rank=0, timeout=MDS_RESTART_GRACE)
126
127 if i in [2,5]:
128 self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30)
129 if i == 2:
130 self.assertEqual(self._get_last_created_snap(rank=0), last_created)
131 else:
132 self.assertGreater(self._get_last_created_snap(rank=0), last_created)
133
134 self.mount_a.mount_wait()
135
136 self.mount_a.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
137
138 # mds_kill_mdstable_at:
139 # 3: MDSTableClient::handle_request (got agree)
140 # 4: MDSTableClient::commit
141 # 7: MDSTableClient::handle_request (got ack)
142 for i in [3,4,7]:
143 log.info("testing snapclient mds_kill_mdstable_at={0}".format(i))
144 last_created = self._get_last_created_snap(rank=0)
145
146 status = self.fs.status()
147 rank1 = self.fs.get_rank(rank=1, status=status)
148 self.fs.rank_freeze(True, rank=1) # prevent failover...
149 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i)], rank=1, status=status)
150 proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s3{0}".format(i)], wait=False)
151 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=1), timeout=grace*2);
152 self.delete_mds_coredump(rank1['name']);
153
154 self.mount_a.kill()
155 self.mount_a.kill_cleanup()
156
157 if i in [3,4]:
158 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 1)
159 elif i == 7:
160 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 0)
161 self.assertGreater(self._get_last_created_snap(rank=0), last_created)
162
163 self.fs.rank_fail(rank=1)
164 self.fs.mds_restart(rank1['name'])
165 self.wait_for_daemon_start([rank1['name']])
166 status = self.fs.wait_for_daemons(timeout=MDS_RESTART_GRACE)
167
168 if i in [3,4]:
169 self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30)
170 if i == 3:
171 self.assertEqual(self._get_last_created_snap(rank=0), last_created)
172 else:
173 self.assertGreater(self._get_last_created_snap(rank=0), last_created)
174
175 self.mount_a.mount_wait()
176
177 self.mount_a.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
178
179 # mds_kill_mdstable_at:
180 # 3: MDSTableClient::handle_request (got agree)
181 # 8: MDSTableServer::handle_rollback
182 log.info("testing snapclient mds_kill_mdstable_at=3, snapserver mds_kill_mdstable_at=8")
183 last_created = self._get_last_created_snap(rank=0)
184
185 status = self.fs.status()
186 rank0 = self.fs.get_rank(rank=0, status=status)
187 rank1 = self.fs.get_rank(rank=1, status=status)
188 self.fs.rank_freeze(True, rank=0)
189 self.fs.rank_freeze(True, rank=1)
190 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "8"], rank=0, status=status)
191 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "3"], rank=1, status=status)
192 proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s4"], wait=False)
193 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=1), timeout=grace*2);
194 self.delete_mds_coredump(rank1['name']);
195
196 self.mount_a.kill()
197 self.mount_a.kill_cleanup()
198
199 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 1)
200
201 self.fs.rank_fail(rank=1)
202 self.fs.mds_restart(rank1['name'])
203 self.wait_for_daemon_start([rank1['name']])
204
205 # rollback triggers assertion
206 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=0), timeout=grace*2);
207 self.delete_mds_coredump(rank0['name']);
208 self.fs.rank_fail(rank=0)
209 self.fs.mds_restart(rank0['name'])
210 self.wait_for_daemon_start([rank0['name']])
211 self.fs.wait_for_state('up:active', rank=0, timeout=MDS_RESTART_GRACE)
212
213 # mds.1 should re-send rollback message
214 self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30)
215 self.assertEqual(self._get_last_created_snap(rank=0), last_created)
216
217 self.mount_a.mount_wait()
218
219 def test_snapclient_cache(self):
220 """
221 check if snapclient cache gets synced properly
222 """
223 self.fs.set_allow_new_snaps(True);
224 self.fs.set_max_mds(3)
225 status = self.fs.wait_for_daemons()
226
227 grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon"))
228
229 self.mount_a.run_shell(["mkdir", "-p", "d0/d1/dir"])
230 self.mount_a.run_shell(["mkdir", "-p", "d0/d2/dir"])
231 self.mount_a.setfattr("d0", "ceph.dir.pin", "0")
232 self.mount_a.setfattr("d0/d1", "ceph.dir.pin", "1")
233 self.mount_a.setfattr("d0/d2", "ceph.dir.pin", "2")
234 self._wait_subtrees([("/d0", 0), ("/d0/d1", 1), ("/d0/d2", 2)], rank="all", status=status, path="/d0")
235
236 def _check_snapclient_cache(snaps_dump, cache_dump=None, rank=0):
237 if cache_dump is None:
238 cache_dump = self._get_snapclient_dump(rank=rank)
239 for key, value in cache_dump.items():
240 if value != snaps_dump[key]:
241 return False
242 return True;
243
244 # sync after mksnap
245 last_created = self._get_last_created_snap(rank=0)
246 self.mount_a.run_shell(["mkdir", "d0/d1/dir/.snap/s1", "d0/d1/dir/.snap/s2"])
247 self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30)
248 self.assertGreater(self._get_last_created_snap(rank=0), last_created)
249
250 snaps_dump = self._get_snapserver_dump(rank=0)
251 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=0));
252 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=1));
253 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=2));
254
255 # sync after rmsnap
256 last_destroyed = self._get_last_destroyed_snap(rank=0)
257 self.mount_a.run_shell(["rmdir", "d0/d1/dir/.snap/s1"])
258 self.wait_until_true(lambda: len(self._get_pending_snap_destroy(rank=0)) == 0, timeout=30)
259 self.assertGreater(self._get_last_destroyed_snap(rank=0), last_destroyed)
260
261 snaps_dump = self._get_snapserver_dump(rank=0)
262 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=0));
263 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=1));
264 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=2));
265
266 # sync during mds recovers
267 self.fs.rank_fail(rank=2)
268 status = self.fs.wait_for_daemons(timeout=MDS_RESTART_GRACE)
269 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=2));
270
271 self.fs.rank_fail(rank=0)
272 self.fs.rank_fail(rank=1)
273 status = self.fs.wait_for_daemons()
274 self.fs.wait_for_state('up:active', rank=0, timeout=MDS_RESTART_GRACE)
275 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=0));
276 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=1));
277 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=2));
278
279 # kill at MDSTableClient::handle_notify_prep
280 status = self.fs.status()
281 rank2 = self.fs.get_rank(rank=2, status=status)
282 self.fs.rank_freeze(True, rank=2)
283 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "9"], rank=2, status=status)
284 proc = self.mount_a.run_shell(["mkdir", "d0/d1/dir/.snap/s3"], wait=False)
285 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=2), timeout=grace*2);
286 self.delete_mds_coredump(rank2['name']);
287
288 # mksnap should wait for notify ack from mds.2
289 self.assertFalse(proc.finished);
290
291 # mksnap should proceed after mds.2 fails
292 self.fs.rank_fail(rank=2)
293 self.wait_until_true(lambda: proc.finished, timeout=30);
294
295 self.fs.mds_restart(rank2['name'])
296 self.wait_for_daemon_start([rank2['name']])
297 status = self.fs.wait_for_daemons(timeout=MDS_RESTART_GRACE)
298
299 self.mount_a.run_shell(["rmdir", Raw("d0/d1/dir/.snap/*")])
300
301 # kill at MDSTableClient::commit
302 # the recovering mds should sync all mds' cache when it enters resolve stage
303 self.set_conf("mds", "mds_reconnect_timeout", "5")
304 for i in range(1, 4):
305 status = self.fs.status()
306 rank2 = self.fs.get_rank(rank=2, status=status)
307 self.fs.rank_freeze(True, rank=2)
308 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "4"], rank=2, status=status)
309 last_created = self._get_last_created_snap(rank=0)
310 proc = self.mount_a.run_shell(["mkdir", "d0/d2/dir/.snap/s{0}".format(i)], wait=False)
311 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=2), timeout=grace*2);
312 self.delete_mds_coredump(rank2['name']);
313
314 self.mount_a.kill()
315 self.mount_a.kill_cleanup()
316
317 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 1)
318
319 if i in [2,4]:
320 self.fs.rank_fail(rank=0)
321 if i in [3,4]:
322 self.fs.rank_fail(rank=1)
323
324 self.fs.rank_fail(rank=2)
325 self.fs.mds_restart(rank2['name'])
326 self.wait_for_daemon_start([rank2['name']])
327 status = self.fs.wait_for_daemons(timeout=MDS_RESTART_GRACE)
328
329 rank0_cache = self._get_snapclient_dump(rank=0)
330 rank1_cache = self._get_snapclient_dump(rank=1)
331 rank2_cache = self._get_snapclient_dump(rank=2)
332
333 self.assertGreater(int(rank0_cache["last_created"]), last_created)
334 self.assertEqual(rank0_cache, rank1_cache);
335 self.assertEqual(rank0_cache, rank2_cache);
336
337 self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30)
338
339 snaps_dump = self._get_snapserver_dump(rank=0)
340 self.assertEqual(snaps_dump["last_created"], rank0_cache["last_created"])
341 self.assertTrue(_check_snapclient_cache(snaps_dump, cache_dump=rank0_cache));
342
343 self.mount_a.mount_wait()
344
345 self.mount_a.run_shell(["rmdir", Raw("d0/d2/dir/.snap/*")])
346
347 def test_multimds_mksnap(self):
348 """
349 check if snapshot takes effect across authority subtrees
350 """
351 self.fs.set_allow_new_snaps(True);
352 self.fs.set_max_mds(2)
353 status = self.fs.wait_for_daemons()
354
355 self.mount_a.run_shell(["mkdir", "-p", "d0/d1/empty"])
356 self.mount_a.setfattr("d0", "ceph.dir.pin", "0")
357 self.mount_a.setfattr("d0/d1", "ceph.dir.pin", "1")
358 self._wait_subtrees([("/d0", 0), ("/d0/d1", 1)], rank="all", status=status, path="/d0")
359
360 self.mount_a.write_test_pattern("d0/d1/file_a", 8 * 1024 * 1024)
361 self.mount_a.run_shell(["mkdir", "d0/.snap/s1"])
362 self.mount_a.run_shell(["rm", "-f", "d0/d1/file_a"])
363 self.mount_a.validate_test_pattern("d0/.snap/s1/d1/file_a", 8 * 1024 * 1024)
364
365 self.mount_a.run_shell(["rmdir", "d0/.snap/s1"])
366 self.mount_a.run_shell(["rm", "-rf", "d0"])
367
368 def test_multimds_past_parents(self):
369 """
370 check if past parents are properly recorded during across authority rename
371 """
372 self.fs.set_allow_new_snaps(True);
373 self.fs.set_max_mds(2)
374 status = self.fs.wait_for_daemons()
375
376 self.mount_a.run_shell_payload("mkdir -p {d0,d1}/empty")
377 self.mount_a.setfattr("d0", "ceph.dir.pin", "0")
378 self.mount_a.setfattr("d1", "ceph.dir.pin", "1")
379 self._wait_subtrees([("/d0", 0), ("/d1", 1)], rank=0, status=status)
380
381 self.mount_a.run_shell(["mkdir", "d0/d3"])
382 self.mount_a.run_shell(["mkdir", "d0/.snap/s1"])
383 snap_name = self.mount_a.run_shell(["ls", "d0/d3/.snap"]).stdout.getvalue()
384
385 self.mount_a.run_shell(["mv", "d0/d3", "d1/d3"])
386 snap_name1 = self.mount_a.run_shell(["ls", "d1/d3/.snap"]).stdout.getvalue()
387 self.assertEqual(snap_name1, snap_name);
388
389 self.mount_a.run_shell(["rmdir", "d0/.snap/s1"])
390 snap_name1 = self.mount_a.run_shell(["ls", "d1/d3/.snap"]).stdout.getvalue()
391 self.assertEqual(snap_name1, "");
392
393 self.mount_a.run_shell(["rm", "-rf", "d0", "d1"])
394
395 def test_multimds_hardlink(self):
396 """
397 check if hardlink snapshot works in multimds setup
398 """
399 self.fs.set_allow_new_snaps(True);
400 self.fs.set_max_mds(2)
401 status = self.fs.wait_for_daemons()
402
403 self.mount_a.run_shell_payload("mkdir -p {d0,d1}/empty")
404
405 self.mount_a.setfattr("d0", "ceph.dir.pin", "0")
406 self.mount_a.setfattr("d1", "ceph.dir.pin", "1")
407 self._wait_subtrees([("/d0", 0), ("/d1", 1)], rank=0, status=status)
408
409 self.mount_a.run_python(dedent("""
410 import os
411 open(os.path.join("{path}", "d0/file1"), 'w').write("asdf")
412 open(os.path.join("{path}", "d0/file2"), 'w').write("asdf")
413 """.format(path=self.mount_a.mountpoint)
414 ))
415
416 self.mount_a.run_shell(["ln", "d0/file1", "d1/file1"])
417 self.mount_a.run_shell(["ln", "d0/file2", "d1/file2"])
418
419 self.mount_a.run_shell(["mkdir", "d1/.snap/s1"])
420
421 self.mount_a.run_python(dedent("""
422 import os
423 open(os.path.join("{path}", "d0/file1"), 'w').write("qwer")
424 """.format(path=self.mount_a.mountpoint)
425 ))
426
427 self.mount_a.run_shell(["grep", "asdf", "d1/.snap/s1/file1"])
428
429 self.mount_a.run_shell(["rm", "-f", "d0/file2"])
430 self.mount_a.run_shell(["grep", "asdf", "d1/.snap/s1/file2"])
431
432 self.mount_a.run_shell(["rm", "-f", "d1/file2"])
433 self.mount_a.run_shell(["grep", "asdf", "d1/.snap/s1/file2"])
434
435 self.mount_a.run_shell(["rmdir", "d1/.snap/s1"])
436 self.mount_a.run_shell(["rm", "-rf", "d0", "d1"])
437
438 class SnapLimitViolationException(Exception):
439 failed_snapshot_number = -1
440
441 def __init__(self, num):
442 self.failed_snapshot_number = num
443
444 def get_snap_name(self, dir_name, sno):
445 sname = "{dir_name}/.snap/s_{sno}".format(dir_name=dir_name, sno=sno)
446 return sname
447
448 def create_snap_dir(self, sname):
449 self.mount_a.run_shell(["mkdir", sname])
450
451 def delete_dir_and_snaps(self, dir_name, snaps):
452 for sno in range(1, snaps+1, 1):
453 sname = self.get_snap_name(dir_name, sno)
454 self.mount_a.run_shell(["rmdir", sname])
455 self.mount_a.run_shell(["rmdir", dir_name])
456
457 def create_dir_and_snaps(self, dir_name, snaps):
458 self.mount_a.run_shell(["mkdir", dir_name])
459
460 for sno in range(1, snaps+1, 1):
461 sname = self.get_snap_name(dir_name, sno)
462 try:
463 self.create_snap_dir(sname)
464 except CommandFailedError as e:
465 # failing at the last mkdir beyond the limit is expected
466 if sno == snaps:
467 log.info("failed while creating snap #{}: {}".format(sno, repr(e)))
468 raise TestSnapshots.SnapLimitViolationException(sno)
469
470 def test_mds_max_snaps_per_dir_default_limit(self):
471 """
472 Test the newly introudced option named mds_max_snaps_per_dir
473 Default snaps limit is 100
474 Test if the default number of snapshot directories can be created
475 """
476 self.create_dir_and_snaps("accounts", int(self.mds_max_snaps_per_dir))
477 self.delete_dir_and_snaps("accounts", int(self.mds_max_snaps_per_dir))
478
479 def test_mds_max_snaps_per_dir_with_increased_limit(self):
480 """
481 Test the newly introudced option named mds_max_snaps_per_dir
482 First create 101 directories and ensure that the 101st directory
483 creation fails. Then increase the default by one and see if the
484 additional directory creation succeeds
485 """
486 # first test the default limit
487 new_limit = int(self.mds_max_snaps_per_dir)
488 self.fs.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit)])
489 try:
490 self.create_dir_and_snaps("accounts", new_limit + 1)
491 except TestSnapshots.SnapLimitViolationException as e:
492 if e.failed_snapshot_number == (new_limit + 1):
493 pass
494 # then increase the limit by one and test
495 new_limit = new_limit + 1
496 self.fs.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit)])
497 sname = self.get_snap_name("accounts", new_limit)
498 self.create_snap_dir(sname)
499 self.delete_dir_and_snaps("accounts", new_limit)
500
501 def test_mds_max_snaps_per_dir_with_reduced_limit(self):
502 """
503 Test the newly introudced option named mds_max_snaps_per_dir
504 First create 99 directories. Then reduce the limit to 98. Then try
505 creating another directory and ensure that additional directory
506 creation fails.
507 """
508 # first test the new limit
509 new_limit = int(self.mds_max_snaps_per_dir) - 1
510 self.create_dir_and_snaps("accounts", new_limit)
511 sname = self.get_snap_name("accounts", new_limit + 1)
512 # then reduce the limit by one and test
513 new_limit = new_limit - 1
514 self.fs.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit)])
515 try:
516 self.create_snap_dir(sname)
517 except CommandFailedError:
518 # after reducing limit we expect the new snapshot creation to fail
519 pass
520 self.delete_dir_and_snaps("accounts", new_limit + 1)