]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_snapshots.py
import 15.2.4
[ceph.git] / ceph / qa / tasks / cephfs / test_snapshots.py
1 import sys
2 import logging
3 import signal
4 from textwrap import dedent
5 from tasks.cephfs.fuse_mount import FuseMount
6 from tasks.cephfs.cephfs_test_case import CephFSTestCase
7 from teuthology.orchestra.run import CommandFailedError, Raw
8
9 log = logging.getLogger(__name__)
10
11 MDS_RESTART_GRACE = 60
12
13 class TestSnapshots(CephFSTestCase):
14 MDSS_REQUIRED = 3
15 LOAD_SETTINGS = ["mds_max_snaps_per_dir"]
16
17 def _check_subtree(self, rank, path, status=None):
18 got_subtrees = self.fs.rank_asok(["get", "subtrees"], rank=rank, status=status)
19 for s in got_subtrees:
20 if s['dir']['path'] == path and s['auth_first'] == rank:
21 return True
22 return False
23
24 def _get_snapclient_dump(self, rank=0, status=None):
25 return self.fs.rank_asok(["dump", "snaps"], rank=rank, status=status)
26
27 def _get_snapserver_dump(self, rank=0, status=None):
28 return self.fs.rank_asok(["dump", "snaps", "--server"], rank=rank, status=status)
29
30 def _get_last_created_snap(self, rank=0, status=None):
31 return int(self._get_snapserver_dump(rank,status=status)["last_created"])
32
33 def _get_last_destroyed_snap(self, rank=0, status=None):
34 return int(self._get_snapserver_dump(rank,status=status)["last_destroyed"])
35
36 def _get_pending_snap_update(self, rank=0, status=None):
37 return self._get_snapserver_dump(rank,status=status)["pending_update"]
38
39 def _get_pending_snap_destroy(self, rank=0, status=None):
40 return self._get_snapserver_dump(rank,status=status)["pending_destroy"]
41
42 def test_kill_mdstable(self):
43 """
44 check snaptable transcation
45 """
46 if not isinstance(self.mount_a, FuseMount):
47 self.skipTest("Require FUSE client to forcibly kill mount")
48
49 self.fs.set_allow_new_snaps(True);
50 self.fs.set_max_mds(2)
51 status = self.fs.wait_for_daemons()
52
53 grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon"))
54
55 # setup subtrees
56 self.mount_a.run_shell(["mkdir", "-p", "d1/dir"])
57 self.mount_a.setfattr("d1", "ceph.dir.pin", "1")
58 self.wait_until_true(lambda: self._check_subtree(1, '/d1', status=status), timeout=30)
59
60 last_created = self._get_last_created_snap(rank=0,status=status)
61
62 # mds_kill_mdstable_at:
63 # 1: MDSTableServer::handle_prepare
64 # 2: MDSTableServer::_prepare_logged
65 # 5: MDSTableServer::handle_commit
66 # 6: MDSTableServer::_commit_logged
67 for i in [1,2,5,6]:
68 log.info("testing snapserver mds_kill_mdstable_at={0}".format(i))
69
70 status = self.fs.status()
71 rank0 = self.fs.get_rank(rank=0, status=status)
72 self.fs.rank_freeze(True, rank=0)
73 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i)], rank=0, status=status)
74 proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s1{0}".format(i)], wait=False)
75 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=0), timeout=grace*2);
76 self.delete_mds_coredump(rank0['name']);
77
78 self.fs.rank_fail(rank=0)
79 self.fs.mds_restart(rank0['name'])
80 self.wait_for_daemon_start([rank0['name']])
81 status = self.fs.wait_for_daemons()
82
83 proc.wait()
84 last_created += 1
85 self.wait_until_true(lambda: self._get_last_created_snap(rank=0) == last_created, timeout=30)
86
87 self.set_conf("mds", "mds_reconnect_timeout", "5")
88
89 self.mount_a.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
90
91 # set mds_kill_mdstable_at, also kill snapclient
92 for i in [2,5,6]:
93 log.info("testing snapserver mds_kill_mdstable_at={0}, also kill snapclient".format(i))
94 status = self.fs.status()
95 last_created = self._get_last_created_snap(rank=0, status=status)
96
97 rank0 = self.fs.get_rank(rank=0, status=status)
98 rank1 = self.fs.get_rank(rank=1, status=status)
99 self.fs.rank_freeze(True, rank=0) # prevent failover...
100 self.fs.rank_freeze(True, rank=1) # prevent failover...
101 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i)], rank=0, status=status)
102 proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s2{0}".format(i)], wait=False)
103 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=0), timeout=grace*2);
104 self.delete_mds_coredump(rank0['name']);
105
106 self.fs.rank_signal(signal.SIGKILL, rank=1)
107
108 self.mount_a.kill()
109 self.mount_a.kill_cleanup()
110
111 self.fs.rank_fail(rank=0)
112 self.fs.mds_restart(rank0['name'])
113 self.wait_for_daemon_start([rank0['name']])
114
115 self.fs.wait_for_state('up:resolve', rank=0, timeout=MDS_RESTART_GRACE)
116 if i in [2,5]:
117 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 1)
118 elif i == 6:
119 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 0)
120 self.assertGreater(self._get_last_created_snap(rank=0), last_created)
121
122 self.fs.rank_fail(rank=1)
123 self.fs.mds_restart(rank1['name'])
124 self.wait_for_daemon_start([rank1['name']])
125 self.fs.wait_for_state('up:active', rank=0, timeout=MDS_RESTART_GRACE)
126
127 if i in [2,5]:
128 self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30)
129 if i == 2:
130 self.assertEqual(self._get_last_created_snap(rank=0), last_created)
131 else:
132 self.assertGreater(self._get_last_created_snap(rank=0), last_created)
133
134 self.mount_a.mount_wait()
135
136 self.mount_a.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
137
138 # mds_kill_mdstable_at:
139 # 3: MDSTableClient::handle_request (got agree)
140 # 4: MDSTableClient::commit
141 # 7: MDSTableClient::handle_request (got ack)
142 for i in [3,4,7]:
143 log.info("testing snapclient mds_kill_mdstable_at={0}".format(i))
144 last_created = self._get_last_created_snap(rank=0)
145
146 status = self.fs.status()
147 rank1 = self.fs.get_rank(rank=1, status=status)
148 self.fs.rank_freeze(True, rank=1) # prevent failover...
149 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i)], rank=1, status=status)
150 proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s3{0}".format(i)], wait=False)
151 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=1), timeout=grace*2);
152 self.delete_mds_coredump(rank1['name']);
153
154 self.mount_a.kill()
155 self.mount_a.kill_cleanup()
156
157 if i in [3,4]:
158 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 1)
159 elif i == 7:
160 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 0)
161 self.assertGreater(self._get_last_created_snap(rank=0), last_created)
162
163 self.fs.rank_fail(rank=1)
164 self.fs.mds_restart(rank1['name'])
165 self.wait_for_daemon_start([rank1['name']])
166 status = self.fs.wait_for_daemons(timeout=MDS_RESTART_GRACE)
167
168 if i in [3,4]:
169 self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30)
170 if i == 3:
171 self.assertEqual(self._get_last_created_snap(rank=0), last_created)
172 else:
173 self.assertGreater(self._get_last_created_snap(rank=0), last_created)
174
175 self.mount_a.mount_wait()
176
177 self.mount_a.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
178
179 # mds_kill_mdstable_at:
180 # 3: MDSTableClient::handle_request (got agree)
181 # 8: MDSTableServer::handle_rollback
182 log.info("testing snapclient mds_kill_mdstable_at=3, snapserver mds_kill_mdstable_at=8")
183 last_created = self._get_last_created_snap(rank=0)
184
185 status = self.fs.status()
186 rank0 = self.fs.get_rank(rank=0, status=status)
187 rank1 = self.fs.get_rank(rank=1, status=status)
188 self.fs.rank_freeze(True, rank=0)
189 self.fs.rank_freeze(True, rank=1)
190 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "8"], rank=0, status=status)
191 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "3"], rank=1, status=status)
192 proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s4"], wait=False)
193 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=1), timeout=grace*2);
194 self.delete_mds_coredump(rank1['name']);
195
196 self.mount_a.kill()
197 self.mount_a.kill_cleanup()
198
199 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 1)
200
201 self.fs.rank_fail(rank=1)
202 self.fs.mds_restart(rank1['name'])
203 self.wait_for_daemon_start([rank1['name']])
204
205 # rollback triggers assertion
206 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=0), timeout=grace*2);
207 self.delete_mds_coredump(rank0['name']);
208 self.fs.rank_fail(rank=0)
209 self.fs.mds_restart(rank0['name'])
210 self.wait_for_daemon_start([rank0['name']])
211 self.fs.wait_for_state('up:active', rank=0, timeout=MDS_RESTART_GRACE)
212
213 # mds.1 should re-send rollback message
214 self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30)
215 self.assertEqual(self._get_last_created_snap(rank=0), last_created)
216
217 self.mount_a.mount_wait()
218
219 def test_snapclient_cache(self):
220 """
221 check if snapclient cache gets synced properly
222 """
223 self.fs.set_allow_new_snaps(True);
224 self.fs.set_max_mds(3)
225 status = self.fs.wait_for_daemons()
226
227 grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon"))
228
229 self.mount_a.run_shell(["mkdir", "-p", "d0/d1/dir"])
230 self.mount_a.run_shell(["mkdir", "-p", "d0/d2/dir"])
231 self.mount_a.setfattr("d0", "ceph.dir.pin", "0")
232 self.mount_a.setfattr("d0/d1", "ceph.dir.pin", "1")
233 self.mount_a.setfattr("d0/d2", "ceph.dir.pin", "2")
234 self.wait_until_true(lambda: self._check_subtree(2, '/d0/d2', status=status), timeout=30)
235 self.wait_until_true(lambda: self._check_subtree(1, '/d0/d1', status=status), timeout=5)
236 self.wait_until_true(lambda: self._check_subtree(0, '/d0', status=status), timeout=5)
237
238 def _check_snapclient_cache(snaps_dump, cache_dump=None, rank=0):
239 if cache_dump is None:
240 cache_dump = self._get_snapclient_dump(rank=rank)
241 for key, value in cache_dump.items():
242 if value != snaps_dump[key]:
243 return False
244 return True;
245
246 # sync after mksnap
247 last_created = self._get_last_created_snap(rank=0)
248 self.mount_a.run_shell(["mkdir", "d0/d1/dir/.snap/s1", "d0/d1/dir/.snap/s2"])
249 self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30)
250 self.assertGreater(self._get_last_created_snap(rank=0), last_created)
251
252 snaps_dump = self._get_snapserver_dump(rank=0)
253 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=0));
254 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=1));
255 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=2));
256
257 # sync after rmsnap
258 last_destroyed = self._get_last_destroyed_snap(rank=0)
259 self.mount_a.run_shell(["rmdir", "d0/d1/dir/.snap/s1"])
260 self.wait_until_true(lambda: len(self._get_pending_snap_destroy(rank=0)) == 0, timeout=30)
261 self.assertGreater(self._get_last_destroyed_snap(rank=0), last_destroyed)
262
263 snaps_dump = self._get_snapserver_dump(rank=0)
264 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=0));
265 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=1));
266 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=2));
267
268 # sync during mds recovers
269 self.fs.rank_fail(rank=2)
270 status = self.fs.wait_for_daemons(timeout=MDS_RESTART_GRACE)
271 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=2));
272
273 self.fs.rank_fail(rank=0)
274 self.fs.rank_fail(rank=1)
275 status = self.fs.wait_for_daemons()
276 self.fs.wait_for_state('up:active', rank=0, timeout=MDS_RESTART_GRACE)
277 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=0));
278 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=1));
279 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=2));
280
281 # kill at MDSTableClient::handle_notify_prep
282 status = self.fs.status()
283 rank2 = self.fs.get_rank(rank=2, status=status)
284 self.fs.rank_freeze(True, rank=2)
285 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "9"], rank=2, status=status)
286 proc = self.mount_a.run_shell(["mkdir", "d0/d1/dir/.snap/s3"], wait=False)
287 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=2), timeout=grace*2);
288 self.delete_mds_coredump(rank2['name']);
289
290 # mksnap should wait for notify ack from mds.2
291 self.assertFalse(proc.finished);
292
293 # mksnap should proceed after mds.2 fails
294 self.fs.rank_fail(rank=2)
295 self.wait_until_true(lambda: proc.finished, timeout=30);
296
297 self.fs.mds_restart(rank2['name'])
298 self.wait_for_daemon_start([rank2['name']])
299 status = self.fs.wait_for_daemons(timeout=MDS_RESTART_GRACE)
300
301 self.mount_a.run_shell(["rmdir", Raw("d0/d1/dir/.snap/*")])
302
303 # kill at MDSTableClient::commit
304 # the recovering mds should sync all mds' cache when it enters resolve stage
305 self.set_conf("mds", "mds_reconnect_timeout", "5")
306 for i in range(1, 4):
307 status = self.fs.status()
308 rank2 = self.fs.get_rank(rank=2, status=status)
309 self.fs.rank_freeze(True, rank=2)
310 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "4"], rank=2, status=status)
311 last_created = self._get_last_created_snap(rank=0)
312 proc = self.mount_a.run_shell(["mkdir", "d0/d2/dir/.snap/s{0}".format(i)], wait=False)
313 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=2), timeout=grace*2);
314 self.delete_mds_coredump(rank2['name']);
315
316 self.mount_a.kill()
317 self.mount_a.kill_cleanup()
318
319 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 1)
320
321 if i in [2,4]:
322 self.fs.rank_fail(rank=0)
323 if i in [3,4]:
324 self.fs.rank_fail(rank=1)
325
326 self.fs.rank_fail(rank=2)
327 self.fs.mds_restart(rank2['name'])
328 self.wait_for_daemon_start([rank2['name']])
329 status = self.fs.wait_for_daemons(timeout=MDS_RESTART_GRACE)
330
331 rank0_cache = self._get_snapclient_dump(rank=0)
332 rank1_cache = self._get_snapclient_dump(rank=1)
333 rank2_cache = self._get_snapclient_dump(rank=2)
334
335 self.assertGreater(int(rank0_cache["last_created"]), last_created)
336 self.assertEqual(rank0_cache, rank1_cache);
337 self.assertEqual(rank0_cache, rank2_cache);
338
339 self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30)
340
341 snaps_dump = self._get_snapserver_dump(rank=0)
342 self.assertEqual(snaps_dump["last_created"], rank0_cache["last_created"])
343 self.assertTrue(_check_snapclient_cache(snaps_dump, cache_dump=rank0_cache));
344
345 self.mount_a.mount_wait()
346
347 self.mount_a.run_shell(["rmdir", Raw("d0/d2/dir/.snap/*")])
348
349 def test_multimds_mksnap(self):
350 """
351 check if snapshot takes effect across authority subtrees
352 """
353 self.fs.set_allow_new_snaps(True);
354 self.fs.set_max_mds(2)
355 status = self.fs.wait_for_daemons()
356
357 self.mount_a.run_shell(["mkdir", "-p", "d0/d1"])
358 self.mount_a.setfattr("d0", "ceph.dir.pin", "0")
359 self.mount_a.setfattr("d0/d1", "ceph.dir.pin", "1")
360 self.wait_until_true(lambda: self._check_subtree(1, '/d0/d1', status=status), timeout=30)
361 self.wait_until_true(lambda: self._check_subtree(0, '/d0', status=status), timeout=5)
362
363 self.mount_a.write_test_pattern("d0/d1/file_a", 8 * 1024 * 1024)
364 self.mount_a.run_shell(["mkdir", "d0/.snap/s1"])
365 self.mount_a.run_shell(["rm", "-f", "d0/d1/file_a"])
366 self.mount_a.validate_test_pattern("d0/.snap/s1/d1/file_a", 8 * 1024 * 1024)
367
368 self.mount_a.run_shell(["rmdir", "d0/.snap/s1"])
369 self.mount_a.run_shell(["rm", "-rf", "d0"])
370
371 def test_multimds_past_parents(self):
372 """
373 check if past parents are properly recorded during across authority rename
374 """
375 self.fs.set_allow_new_snaps(True);
376 self.fs.set_max_mds(2)
377 status = self.fs.wait_for_daemons()
378
379 self.mount_a.run_shell(["mkdir", "d0", "d1"])
380 self.mount_a.setfattr("d0", "ceph.dir.pin", "0")
381 self.mount_a.setfattr("d1", "ceph.dir.pin", "1")
382 self.wait_until_true(lambda: self._check_subtree(1, '/d1', status=status), timeout=30)
383 self.wait_until_true(lambda: self._check_subtree(0, '/d0', status=status), timeout=5)
384
385 self.mount_a.run_shell(["mkdir", "d0/d3"])
386 self.mount_a.run_shell(["mkdir", "d0/.snap/s1"])
387 snap_name = self.mount_a.run_shell(["ls", "d0/d3/.snap"]).stdout.getvalue()
388
389 self.mount_a.run_shell(["mv", "d0/d3", "d1/d3"])
390 snap_name1 = self.mount_a.run_shell(["ls", "d1/d3/.snap"]).stdout.getvalue()
391 self.assertEqual(snap_name1, snap_name);
392
393 self.mount_a.run_shell(["rmdir", "d0/.snap/s1"])
394 snap_name1 = self.mount_a.run_shell(["ls", "d1/d3/.snap"]).stdout.getvalue()
395 self.assertEqual(snap_name1, "");
396
397 self.mount_a.run_shell(["rm", "-rf", "d0", "d1"])
398
399 def test_multimds_hardlink(self):
400 """
401 check if hardlink snapshot works in multimds setup
402 """
403 self.fs.set_allow_new_snaps(True);
404 self.fs.set_max_mds(2)
405 status = self.fs.wait_for_daemons()
406
407 self.mount_a.run_shell(["mkdir", "d0", "d1"])
408
409 self.mount_a.setfattr("d0", "ceph.dir.pin", "0")
410 self.mount_a.setfattr("d1", "ceph.dir.pin", "1")
411 self.wait_until_true(lambda: self._check_subtree(1, '/d1', status=status), timeout=30)
412 self.wait_until_true(lambda: self._check_subtree(0, '/d0', status=status), timeout=5)
413
414 self.mount_a.run_python(dedent("""
415 import os
416 open(os.path.join("{path}", "d0/file1"), 'w').write("asdf")
417 open(os.path.join("{path}", "d0/file2"), 'w').write("asdf")
418 """.format(path=self.mount_a.mountpoint)
419 ))
420
421 self.mount_a.run_shell(["ln", "d0/file1", "d1/file1"])
422 self.mount_a.run_shell(["ln", "d0/file2", "d1/file2"])
423
424 self.mount_a.run_shell(["mkdir", "d1/.snap/s1"])
425
426 self.mount_a.run_python(dedent("""
427 import os
428 open(os.path.join("{path}", "d0/file1"), 'w').write("qwer")
429 """.format(path=self.mount_a.mountpoint)
430 ))
431
432 self.mount_a.run_shell(["grep", "asdf", "d1/.snap/s1/file1"])
433
434 self.mount_a.run_shell(["rm", "-f", "d0/file2"])
435 self.mount_a.run_shell(["grep", "asdf", "d1/.snap/s1/file2"])
436
437 self.mount_a.run_shell(["rm", "-f", "d1/file2"])
438 self.mount_a.run_shell(["grep", "asdf", "d1/.snap/s1/file2"])
439
440 self.mount_a.run_shell(["rmdir", "d1/.snap/s1"])
441 self.mount_a.run_shell(["rm", "-rf", "d0", "d1"])
442
443 class SnapLimitViolationException(Exception):
444 failed_snapshot_number = -1
445
446 def __init__(self, num):
447 self.failed_snapshot_number = num
448
449 def get_snap_name(self, dir_name, sno):
450 sname = "{dir_name}/.snap/s_{sno}".format(dir_name=dir_name, sno=sno)
451 return sname
452
453 def create_snap_dir(self, sname):
454 self.mount_a.run_shell(["mkdir", sname])
455
456 def delete_dir_and_snaps(self, dir_name, snaps):
457 for sno in range(1, snaps+1, 1):
458 sname = self.get_snap_name(dir_name, sno)
459 self.mount_a.run_shell(["rmdir", sname])
460 self.mount_a.run_shell(["rmdir", dir_name])
461
462 def create_dir_and_snaps(self, dir_name, snaps):
463 self.mount_a.run_shell(["mkdir", dir_name])
464
465 for sno in range(1, snaps+1, 1):
466 sname = self.get_snap_name(dir_name, sno)
467 try:
468 self.create_snap_dir(sname)
469 except CommandFailedError as e:
470 # failing at the last mkdir beyond the limit is expected
471 if sno == snaps:
472 log.info("failed while creating snap #{}: {}".format(sno, repr(e)))
473 raise TestSnapshots.SnapLimitViolationException(sno)
474
475 def test_mds_max_snaps_per_dir_default_limit(self):
476 """
477 Test the newly introudced option named mds_max_snaps_per_dir
478 Default snaps limit is 100
479 Test if the default number of snapshot directories can be created
480 """
481 self.create_dir_and_snaps("accounts", int(self.mds_max_snaps_per_dir))
482 self.delete_dir_and_snaps("accounts", int(self.mds_max_snaps_per_dir))
483
484 def test_mds_max_snaps_per_dir_with_increased_limit(self):
485 """
486 Test the newly introudced option named mds_max_snaps_per_dir
487 First create 101 directories and ensure that the 101st directory
488 creation fails. Then increase the default by one and see if the
489 additional directory creation succeeds
490 """
491 # first test the default limit
492 new_limit = int(self.mds_max_snaps_per_dir)
493 self.fs.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit)])
494 try:
495 self.create_dir_and_snaps("accounts", new_limit + 1)
496 except TestSnapshots.SnapLimitViolationException as e:
497 if e.failed_snapshot_number == (new_limit + 1):
498 pass
499 # then increase the limit by one and test
500 new_limit = new_limit + 1
501 self.fs.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit)])
502 sname = self.get_snap_name("accounts", new_limit)
503 self.create_snap_dir(sname)
504 self.delete_dir_and_snaps("accounts", new_limit)
505
506 def test_mds_max_snaps_per_dir_with_reduced_limit(self):
507 """
508 Test the newly introudced option named mds_max_snaps_per_dir
509 First create 99 directories. Then reduce the limit to 98. Then try
510 creating another directory and ensure that additional directory
511 creation fails.
512 """
513 # first test the new limit
514 new_limit = int(self.mds_max_snaps_per_dir) - 1
515 self.create_dir_and_snaps("accounts", new_limit)
516 sname = self.get_snap_name("accounts", new_limit + 1)
517 # then reduce the limit by one and test
518 new_limit = new_limit - 1
519 self.fs.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit)])
520 try:
521 self.create_snap_dir(sname)
522 except CommandFailedError:
523 # after reducing limit we expect the new snapshot creation to fail
524 pass
525 self.delete_dir_and_snaps("accounts", new_limit + 1)