]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_snapshots.py
import ceph 16.2.7
[ceph.git] / ceph / qa / tasks / cephfs / test_snapshots.py
1 import errno
2 import logging
3 import signal
4 from textwrap import dedent
5 from tasks.cephfs.fuse_mount import FuseMount
6 from tasks.cephfs.cephfs_test_case import CephFSTestCase
7 from teuthology.orchestra.run import CommandFailedError, Raw
8
9 log = logging.getLogger(__name__)
10
11 MDS_RESTART_GRACE = 60
12
13 class TestSnapshots(CephFSTestCase):
14 MDSS_REQUIRED = 3
15 LOAD_SETTINGS = ["mds_max_snaps_per_dir"]
16
17 def _check_subtree(self, rank, path, status=None):
18 got_subtrees = self.fs.rank_asok(["get", "subtrees"], rank=rank, status=status)
19 for s in got_subtrees:
20 if s['dir']['path'] == path and s['auth_first'] == rank:
21 return True
22 return False
23
24 def _get_snapclient_dump(self, rank=0, status=None):
25 return self.fs.rank_asok(["dump", "snaps"], rank=rank, status=status)
26
27 def _get_snapserver_dump(self, rank=0, status=None):
28 return self.fs.rank_asok(["dump", "snaps", "--server"], rank=rank, status=status)
29
30 def _get_last_created_snap(self, rank=0, status=None):
31 return int(self._get_snapserver_dump(rank,status=status)["last_created"])
32
33 def _get_last_destroyed_snap(self, rank=0, status=None):
34 return int(self._get_snapserver_dump(rank,status=status)["last_destroyed"])
35
36 def _get_pending_snap_update(self, rank=0, status=None):
37 return self._get_snapserver_dump(rank,status=status)["pending_update"]
38
39 def _get_pending_snap_destroy(self, rank=0, status=None):
40 return self._get_snapserver_dump(rank,status=status)["pending_destroy"]
41
42 def test_allow_new_snaps_config(self):
43 """
44 Check whether 'allow_new_snaps' setting works
45 """
46 self.mount_a.run_shell(["mkdir", "test-allow-snaps"])
47
48 self.fs.set_allow_new_snaps(False);
49 try:
50 self.mount_a.run_shell(["mkdir", "test-allow-snaps/.snap/snap00"])
51 except CommandFailedError as ce:
52 self.assertEqual(ce.exitstatus, errno.EPERM, "expected EPERM")
53 else:
54 self.fail("expected snap creatiion to fail")
55
56 self.fs.set_allow_new_snaps(True);
57 self.mount_a.run_shell(["mkdir", "test-allow-snaps/.snap/snap00"])
58 self.mount_a.run_shell(["rmdir", "test-allow-snaps/.snap/snap00"])
59 self.mount_a.run_shell(["rmdir", "test-allow-snaps"])
60
61 def test_kill_mdstable(self):
62 """
63 check snaptable transcation
64 """
65 if not isinstance(self.mount_a, FuseMount):
66 self.skipTest("Require FUSE client to forcibly kill mount")
67
68 self.fs.set_allow_new_snaps(True);
69 self.fs.set_max_mds(2)
70 status = self.fs.wait_for_daemons()
71
72 grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon"))
73
74 # setup subtrees
75 self.mount_a.run_shell(["mkdir", "-p", "d1/dir"])
76 self.mount_a.setfattr("d1", "ceph.dir.pin", "1")
77 self._wait_subtrees([("/d1", 1)], rank=1, path="/d1")
78
79 last_created = self._get_last_created_snap(rank=0,status=status)
80
81 # mds_kill_mdstable_at:
82 # 1: MDSTableServer::handle_prepare
83 # 2: MDSTableServer::_prepare_logged
84 # 5: MDSTableServer::handle_commit
85 # 6: MDSTableServer::_commit_logged
86 for i in [1,2,5,6]:
87 log.info("testing snapserver mds_kill_mdstable_at={0}".format(i))
88
89 status = self.fs.status()
90 rank0 = self.fs.get_rank(rank=0, status=status)
91 self.fs.rank_freeze(True, rank=0)
92 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i)], rank=0, status=status)
93 proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s1{0}".format(i)], wait=False)
94 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=0), timeout=grace*2);
95 self.delete_mds_coredump(rank0['name']);
96
97 self.fs.rank_fail(rank=0)
98 self.fs.mds_restart(rank0['name'])
99 self.wait_for_daemon_start([rank0['name']])
100 status = self.fs.wait_for_daemons()
101
102 proc.wait()
103 last_created += 1
104 self.wait_until_true(lambda: self._get_last_created_snap(rank=0) == last_created, timeout=30)
105
106 self.set_conf("mds", "mds_reconnect_timeout", "5")
107
108 self.mount_a.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
109
110 # set mds_kill_mdstable_at, also kill snapclient
111 for i in [2,5,6]:
112 log.info("testing snapserver mds_kill_mdstable_at={0}, also kill snapclient".format(i))
113 status = self.fs.status()
114 last_created = self._get_last_created_snap(rank=0, status=status)
115
116 rank0 = self.fs.get_rank(rank=0, status=status)
117 rank1 = self.fs.get_rank(rank=1, status=status)
118 self.fs.rank_freeze(True, rank=0) # prevent failover...
119 self.fs.rank_freeze(True, rank=1) # prevent failover...
120 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i)], rank=0, status=status)
121 proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s2{0}".format(i)], wait=False)
122 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=0), timeout=grace*3);
123 self.delete_mds_coredump(rank0['name']);
124
125 self.fs.rank_signal(signal.SIGKILL, rank=1)
126
127 self.mount_a.kill()
128 self.mount_a.kill_cleanup()
129
130 self.fs.rank_fail(rank=0)
131 self.fs.mds_restart(rank0['name'])
132 self.wait_for_daemon_start([rank0['name']])
133
134 self.fs.wait_for_state('up:resolve', rank=0, timeout=MDS_RESTART_GRACE)
135 if i in [2,5]:
136 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 1)
137 elif i == 6:
138 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 0)
139 self.assertGreater(self._get_last_created_snap(rank=0), last_created)
140
141 self.fs.rank_fail(rank=1)
142 self.fs.mds_restart(rank1['name'])
143 self.wait_for_daemon_start([rank1['name']])
144 self.fs.wait_for_state('up:active', rank=0, timeout=MDS_RESTART_GRACE)
145
146 if i in [2,5]:
147 self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30)
148 if i == 2:
149 self.assertEqual(self._get_last_created_snap(rank=0), last_created)
150 else:
151 self.assertGreater(self._get_last_created_snap(rank=0), last_created)
152
153 self.mount_a.mount_wait()
154
155 self.mount_a.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
156
157 # mds_kill_mdstable_at:
158 # 3: MDSTableClient::handle_request (got agree)
159 # 4: MDSTableClient::commit
160 # 7: MDSTableClient::handle_request (got ack)
161 for i in [3,4,7]:
162 log.info("testing snapclient mds_kill_mdstable_at={0}".format(i))
163 last_created = self._get_last_created_snap(rank=0)
164
165 status = self.fs.status()
166 rank1 = self.fs.get_rank(rank=1, status=status)
167 self.fs.rank_freeze(True, rank=1) # prevent failover...
168 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i)], rank=1, status=status)
169 proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s3{0}".format(i)], wait=False)
170 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=1), timeout=grace*2);
171 self.delete_mds_coredump(rank1['name']);
172
173 self.mount_a.kill()
174 self.mount_a.kill_cleanup()
175
176 if i in [3,4]:
177 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 1)
178 elif i == 7:
179 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 0)
180 self.assertGreater(self._get_last_created_snap(rank=0), last_created)
181
182 self.fs.rank_fail(rank=1)
183 self.fs.mds_restart(rank1['name'])
184 self.wait_for_daemon_start([rank1['name']])
185 status = self.fs.wait_for_daemons(timeout=MDS_RESTART_GRACE)
186
187 if i in [3,4]:
188 self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30)
189 if i == 3:
190 self.assertEqual(self._get_last_created_snap(rank=0), last_created)
191 else:
192 self.assertGreater(self._get_last_created_snap(rank=0), last_created)
193
194 self.mount_a.mount_wait()
195
196 self.mount_a.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
197
198 # mds_kill_mdstable_at:
199 # 3: MDSTableClient::handle_request (got agree)
200 # 8: MDSTableServer::handle_rollback
201 log.info("testing snapclient mds_kill_mdstable_at=3, snapserver mds_kill_mdstable_at=8")
202 last_created = self._get_last_created_snap(rank=0)
203
204 status = self.fs.status()
205 rank0 = self.fs.get_rank(rank=0, status=status)
206 rank1 = self.fs.get_rank(rank=1, status=status)
207 self.fs.rank_freeze(True, rank=0)
208 self.fs.rank_freeze(True, rank=1)
209 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "8"], rank=0, status=status)
210 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "3"], rank=1, status=status)
211 proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s4"], wait=False)
212 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=1), timeout=grace*2);
213 self.delete_mds_coredump(rank1['name']);
214
215 self.mount_a.kill()
216 self.mount_a.kill_cleanup()
217
218 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 1)
219
220 self.fs.rank_fail(rank=1)
221 self.fs.mds_restart(rank1['name'])
222 self.wait_for_daemon_start([rank1['name']])
223
224 # rollback triggers assertion
225 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=0), timeout=grace*2);
226 self.delete_mds_coredump(rank0['name']);
227 self.fs.rank_fail(rank=0)
228 self.fs.mds_restart(rank0['name'])
229 self.wait_for_daemon_start([rank0['name']])
230 self.fs.wait_for_state('up:active', rank=0, timeout=MDS_RESTART_GRACE)
231
232 # mds.1 should re-send rollback message
233 self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30)
234 self.assertEqual(self._get_last_created_snap(rank=0), last_created)
235
236 self.mount_a.mount_wait()
237
238 def test_snapclient_cache(self):
239 """
240 check if snapclient cache gets synced properly
241 """
242 self.fs.set_allow_new_snaps(True);
243 self.fs.set_max_mds(3)
244 status = self.fs.wait_for_daemons()
245
246 grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon"))
247
248 self.mount_a.run_shell(["mkdir", "-p", "d0/d1/dir"])
249 self.mount_a.run_shell(["mkdir", "-p", "d0/d2/dir"])
250 self.mount_a.setfattr("d0", "ceph.dir.pin", "0")
251 self.mount_a.setfattr("d0/d1", "ceph.dir.pin", "1")
252 self.mount_a.setfattr("d0/d2", "ceph.dir.pin", "2")
253 self._wait_subtrees([("/d0", 0), ("/d0/d1", 1), ("/d0/d2", 2)], rank="all", status=status, path="/d0")
254
255 def _check_snapclient_cache(snaps_dump, cache_dump=None, rank=0):
256 if cache_dump is None:
257 cache_dump = self._get_snapclient_dump(rank=rank)
258 for key, value in cache_dump.items():
259 if value != snaps_dump[key]:
260 return False
261 return True;
262
263 # sync after mksnap
264 last_created = self._get_last_created_snap(rank=0)
265 self.mount_a.run_shell(["mkdir", "d0/d1/dir/.snap/s1", "d0/d1/dir/.snap/s2"])
266 self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30)
267 self.assertGreater(self._get_last_created_snap(rank=0), last_created)
268
269 snaps_dump = self._get_snapserver_dump(rank=0)
270 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=0));
271 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=1));
272 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=2));
273
274 # sync after rmsnap
275 last_destroyed = self._get_last_destroyed_snap(rank=0)
276 self.mount_a.run_shell(["rmdir", "d0/d1/dir/.snap/s1"])
277 self.wait_until_true(lambda: len(self._get_pending_snap_destroy(rank=0)) == 0, timeout=30)
278 self.assertGreater(self._get_last_destroyed_snap(rank=0), last_destroyed)
279
280 snaps_dump = self._get_snapserver_dump(rank=0)
281 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=0));
282 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=1));
283 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=2));
284
285 # sync during mds recovers
286 self.fs.rank_fail(rank=2)
287 status = self.fs.wait_for_daemons(timeout=MDS_RESTART_GRACE)
288 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=2));
289
290 self.fs.rank_fail(rank=0)
291 self.fs.rank_fail(rank=1)
292 status = self.fs.wait_for_daemons()
293 self.fs.wait_for_state('up:active', rank=0, timeout=MDS_RESTART_GRACE)
294 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=0));
295 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=1));
296 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=2));
297
298 # kill at MDSTableClient::handle_notify_prep
299 status = self.fs.status()
300 rank2 = self.fs.get_rank(rank=2, status=status)
301 self.fs.rank_freeze(True, rank=2)
302 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "9"], rank=2, status=status)
303 proc = self.mount_a.run_shell(["mkdir", "d0/d1/dir/.snap/s3"], wait=False)
304 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=2), timeout=grace*2);
305 self.delete_mds_coredump(rank2['name']);
306
307 # mksnap should wait for notify ack from mds.2
308 self.assertFalse(proc.finished);
309
310 # mksnap should proceed after mds.2 fails
311 self.fs.rank_fail(rank=2)
312 self.wait_until_true(lambda: proc.finished, timeout=30);
313
314 self.fs.mds_restart(rank2['name'])
315 self.wait_for_daemon_start([rank2['name']])
316 status = self.fs.wait_for_daemons(timeout=MDS_RESTART_GRACE)
317
318 self.mount_a.run_shell(["rmdir", Raw("d0/d1/dir/.snap/*")])
319
320 # kill at MDSTableClient::commit
321 # the recovering mds should sync all mds' cache when it enters resolve stage
322 self.set_conf("mds", "mds_reconnect_timeout", "5")
323 for i in range(1, 4):
324 status = self.fs.status()
325 rank2 = self.fs.get_rank(rank=2, status=status)
326 self.fs.rank_freeze(True, rank=2)
327 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "4"], rank=2, status=status)
328 last_created = self._get_last_created_snap(rank=0)
329 proc = self.mount_a.run_shell(["mkdir", "d0/d2/dir/.snap/s{0}".format(i)], wait=False)
330 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=2), timeout=grace*2);
331 self.delete_mds_coredump(rank2['name']);
332
333 self.mount_a.kill()
334 self.mount_a.kill_cleanup()
335
336 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 1)
337
338 if i in [2,4]:
339 self.fs.rank_fail(rank=0)
340 if i in [3,4]:
341 self.fs.rank_fail(rank=1)
342
343 self.fs.rank_fail(rank=2)
344 self.fs.mds_restart(rank2['name'])
345 self.wait_for_daemon_start([rank2['name']])
346 status = self.fs.wait_for_daemons(timeout=MDS_RESTART_GRACE)
347
348 rank0_cache = self._get_snapclient_dump(rank=0)
349 rank1_cache = self._get_snapclient_dump(rank=1)
350 rank2_cache = self._get_snapclient_dump(rank=2)
351
352 self.assertGreater(int(rank0_cache["last_created"]), last_created)
353 self.assertEqual(rank0_cache, rank1_cache);
354 self.assertEqual(rank0_cache, rank2_cache);
355
356 self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30)
357
358 snaps_dump = self._get_snapserver_dump(rank=0)
359 self.assertEqual(snaps_dump["last_created"], rank0_cache["last_created"])
360 self.assertTrue(_check_snapclient_cache(snaps_dump, cache_dump=rank0_cache));
361
362 self.mount_a.mount_wait()
363
364 self.mount_a.run_shell(["rmdir", Raw("d0/d2/dir/.snap/*")])
365
366 def test_multimds_mksnap(self):
367 """
368 check if snapshot takes effect across authority subtrees
369 """
370 self.fs.set_allow_new_snaps(True);
371 self.fs.set_max_mds(2)
372 status = self.fs.wait_for_daemons()
373
374 self.mount_a.run_shell(["mkdir", "-p", "d0/d1/empty"])
375 self.mount_a.setfattr("d0", "ceph.dir.pin", "0")
376 self.mount_a.setfattr("d0/d1", "ceph.dir.pin", "1")
377 self._wait_subtrees([("/d0", 0), ("/d0/d1", 1)], rank="all", status=status, path="/d0")
378
379 self.mount_a.write_test_pattern("d0/d1/file_a", 8 * 1024 * 1024)
380 self.mount_a.run_shell(["mkdir", "d0/.snap/s1"])
381 self.mount_a.run_shell(["rm", "-f", "d0/d1/file_a"])
382 self.mount_a.validate_test_pattern("d0/.snap/s1/d1/file_a", 8 * 1024 * 1024)
383
384 self.mount_a.run_shell(["rmdir", "d0/.snap/s1"])
385 self.mount_a.run_shell(["rm", "-rf", "d0"])
386
387 def test_multimds_past_parents(self):
388 """
389 check if past parents are properly recorded during across authority rename
390 """
391 self.fs.set_allow_new_snaps(True);
392 self.fs.set_max_mds(2)
393 status = self.fs.wait_for_daemons()
394
395 self.mount_a.run_shell_payload("mkdir -p {d0,d1}/empty")
396 self.mount_a.setfattr("d0", "ceph.dir.pin", "0")
397 self.mount_a.setfattr("d1", "ceph.dir.pin", "1")
398 self._wait_subtrees([("/d0", 0), ("/d1", 1)], rank=0, status=status)
399
400 self.mount_a.run_shell(["mkdir", "d0/d3"])
401 self.mount_a.run_shell(["mkdir", "d0/.snap/s1"])
402 snap_name = self.mount_a.run_shell(["ls", "d0/d3/.snap"]).stdout.getvalue()
403
404 self.mount_a.run_shell(["mv", "d0/d3", "d1/d3"])
405 snap_name1 = self.mount_a.run_shell(["ls", "d1/d3/.snap"]).stdout.getvalue()
406 self.assertEqual(snap_name1, snap_name);
407
408 self.mount_a.run_shell(["rmdir", "d0/.snap/s1"])
409 snap_name1 = self.mount_a.run_shell(["ls", "d1/d3/.snap"]).stdout.getvalue()
410 self.assertEqual(snap_name1, "");
411
412 self.mount_a.run_shell(["rm", "-rf", "d0", "d1"])
413
414 def test_multimds_hardlink(self):
415 """
416 check if hardlink snapshot works in multimds setup
417 """
418 self.fs.set_allow_new_snaps(True);
419 self.fs.set_max_mds(2)
420 status = self.fs.wait_for_daemons()
421
422 self.mount_a.run_shell_payload("mkdir -p {d0,d1}/empty")
423
424 self.mount_a.setfattr("d0", "ceph.dir.pin", "0")
425 self.mount_a.setfattr("d1", "ceph.dir.pin", "1")
426 self._wait_subtrees([("/d0", 0), ("/d1", 1)], rank=0, status=status)
427
428 self.mount_a.run_python(dedent("""
429 import os
430 open(os.path.join("{path}", "d0/file1"), 'w').write("asdf")
431 open(os.path.join("{path}", "d0/file2"), 'w').write("asdf")
432 """.format(path=self.mount_a.mountpoint)
433 ))
434
435 self.mount_a.run_shell(["ln", "d0/file1", "d1/file1"])
436 self.mount_a.run_shell(["ln", "d0/file2", "d1/file2"])
437
438 self.mount_a.run_shell(["mkdir", "d1/.snap/s1"])
439
440 self.mount_a.run_python(dedent("""
441 import os
442 open(os.path.join("{path}", "d0/file1"), 'w').write("qwer")
443 """.format(path=self.mount_a.mountpoint)
444 ))
445
446 self.mount_a.run_shell(["grep", "asdf", "d1/.snap/s1/file1"])
447
448 self.mount_a.run_shell(["rm", "-f", "d0/file2"])
449 self.mount_a.run_shell(["grep", "asdf", "d1/.snap/s1/file2"])
450
451 self.mount_a.run_shell(["rm", "-f", "d1/file2"])
452 self.mount_a.run_shell(["grep", "asdf", "d1/.snap/s1/file2"])
453
454 self.mount_a.run_shell(["rmdir", "d1/.snap/s1"])
455 self.mount_a.run_shell(["rm", "-rf", "d0", "d1"])
456
457 class SnapLimitViolationException(Exception):
458 failed_snapshot_number = -1
459
460 def __init__(self, num):
461 self.failed_snapshot_number = num
462
463 def get_snap_name(self, dir_name, sno):
464 sname = "{dir_name}/.snap/s_{sno}".format(dir_name=dir_name, sno=sno)
465 return sname
466
467 def create_snap_dir(self, sname):
468 self.mount_a.run_shell(["mkdir", sname])
469
470 def delete_dir_and_snaps(self, dir_name, snaps):
471 for sno in range(1, snaps+1, 1):
472 sname = self.get_snap_name(dir_name, sno)
473 self.mount_a.run_shell(["rmdir", sname])
474 self.mount_a.run_shell(["rmdir", dir_name])
475
476 def create_dir_and_snaps(self, dir_name, snaps):
477 self.mount_a.run_shell(["mkdir", dir_name])
478
479 for sno in range(1, snaps+1, 1):
480 sname = self.get_snap_name(dir_name, sno)
481 try:
482 self.create_snap_dir(sname)
483 except CommandFailedError as e:
484 # failing at the last mkdir beyond the limit is expected
485 if sno == snaps:
486 log.info("failed while creating snap #{}: {}".format(sno, repr(e)))
487 raise TestSnapshots.SnapLimitViolationException(sno)
488
489 def test_mds_max_snaps_per_dir_default_limit(self):
490 """
491 Test the newly introudced option named mds_max_snaps_per_dir
492 Default snaps limit is 100
493 Test if the default number of snapshot directories can be created
494 """
495 self.create_dir_and_snaps("accounts", int(self.mds_max_snaps_per_dir))
496 self.delete_dir_and_snaps("accounts", int(self.mds_max_snaps_per_dir))
497
498 def test_mds_max_snaps_per_dir_with_increased_limit(self):
499 """
500 Test the newly introudced option named mds_max_snaps_per_dir
501 First create 101 directories and ensure that the 101st directory
502 creation fails. Then increase the default by one and see if the
503 additional directory creation succeeds
504 """
505 # first test the default limit
506 new_limit = int(self.mds_max_snaps_per_dir)
507 self.fs.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit)])
508 try:
509 self.create_dir_and_snaps("accounts", new_limit + 1)
510 except TestSnapshots.SnapLimitViolationException as e:
511 if e.failed_snapshot_number == (new_limit + 1):
512 pass
513 # then increase the limit by one and test
514 new_limit = new_limit + 1
515 self.fs.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit)])
516 sname = self.get_snap_name("accounts", new_limit)
517 self.create_snap_dir(sname)
518 self.delete_dir_and_snaps("accounts", new_limit)
519
520 def test_mds_max_snaps_per_dir_with_reduced_limit(self):
521 """
522 Test the newly introudced option named mds_max_snaps_per_dir
523 First create 99 directories. Then reduce the limit to 98. Then try
524 creating another directory and ensure that additional directory
525 creation fails.
526 """
527 # first test the new limit
528 new_limit = int(self.mds_max_snaps_per_dir) - 1
529 self.create_dir_and_snaps("accounts", new_limit)
530 sname = self.get_snap_name("accounts", new_limit + 1)
531 # then reduce the limit by one and test
532 new_limit = new_limit - 1
533 self.fs.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit)])
534 try:
535 self.create_snap_dir(sname)
536 except CommandFailedError:
537 # after reducing limit we expect the new snapshot creation to fail
538 pass
539 self.delete_dir_and_snaps("accounts", new_limit + 1)