]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_snapshots.py
bc3e6a16c8a5a760d05e71f5262525fc59f4469c
[ceph.git] / ceph / qa / tasks / cephfs / test_snapshots.py
1 import errno
2 import logging
3 import signal
4 from textwrap import dedent
5 from tasks.cephfs.fuse_mount import FuseMount
6 from tasks.cephfs.cephfs_test_case import CephFSTestCase
7 from teuthology.orchestra.run import Raw
8 from teuthology.exceptions import CommandFailedError
9
10 log = logging.getLogger(__name__)
11
12 MDS_RESTART_GRACE = 60
13
14 class TestSnapshots(CephFSTestCase):
15 MDSS_REQUIRED = 3
16 LOAD_SETTINGS = ["mds_max_snaps_per_dir"]
17
18 def _check_subtree(self, rank, path, status=None):
19 got_subtrees = self.fs.rank_asok(["get", "subtrees"], rank=rank, status=status)
20 for s in got_subtrees:
21 if s['dir']['path'] == path and s['auth_first'] == rank:
22 return True
23 return False
24
25 def _get_snapclient_dump(self, rank=0, status=None):
26 return self.fs.rank_asok(["dump", "snaps"], rank=rank, status=status)
27
28 def _get_snapserver_dump(self, rank=0, status=None):
29 return self.fs.rank_asok(["dump", "snaps", "--server"], rank=rank, status=status)
30
31 def _get_last_created_snap(self, rank=0, status=None):
32 return int(self._get_snapserver_dump(rank,status=status)["last_created"])
33
34 def _get_last_destroyed_snap(self, rank=0, status=None):
35 return int(self._get_snapserver_dump(rank,status=status)["last_destroyed"])
36
37 def _get_pending_snap_update(self, rank=0, status=None):
38 return self._get_snapserver_dump(rank,status=status)["pending_update"]
39
40 def _get_pending_snap_destroy(self, rank=0, status=None):
41 return self._get_snapserver_dump(rank,status=status)["pending_destroy"]
42
43 def test_allow_new_snaps_config(self):
44 """
45 Check whether 'allow_new_snaps' setting works
46 """
47 self.mount_a.run_shell(["mkdir", "test-allow-snaps"])
48
49 self.fs.set_allow_new_snaps(False);
50 try:
51 self.mount_a.run_shell(["mkdir", "test-allow-snaps/.snap/snap00"])
52 except CommandFailedError as ce:
53 self.assertEqual(ce.exitstatus, errno.EPERM, "expected EPERM")
54 else:
55 self.fail("expected snap creatiion to fail")
56
57 self.fs.set_allow_new_snaps(True);
58 self.mount_a.run_shell(["mkdir", "test-allow-snaps/.snap/snap00"])
59 self.mount_a.run_shell(["rmdir", "test-allow-snaps/.snap/snap00"])
60 self.mount_a.run_shell(["rmdir", "test-allow-snaps"])
61
62 def test_kill_mdstable(self):
63 """
64 check snaptable transcation
65 """
66 if not isinstance(self.mount_a, FuseMount):
67 self.skipTest("Require FUSE client to forcibly kill mount")
68
69 self.fs.set_allow_new_snaps(True);
70 self.fs.set_max_mds(2)
71 status = self.fs.wait_for_daemons()
72
73 # setup subtrees
74 self.mount_a.run_shell(["mkdir", "-p", "d1/dir"])
75 self.mount_a.setfattr("d1", "ceph.dir.pin", "1")
76 self._wait_subtrees([("/d1", 1)], rank=1, path="/d1")
77
78 last_created = self._get_last_created_snap(rank=0,status=status)
79
80 # mds_kill_mdstable_at:
81 # 1: MDSTableServer::handle_prepare
82 # 2: MDSTableServer::_prepare_logged
83 # 5: MDSTableServer::handle_commit
84 # 6: MDSTableServer::_commit_logged
85 for i in [1,2,5,6]:
86 log.info("testing snapserver mds_kill_mdstable_at={0}".format(i))
87
88 status = self.fs.status()
89 rank0 = self.fs.get_rank(rank=0, status=status)
90 self.fs.rank_freeze(True, rank=0)
91 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i)], rank=0, status=status)
92 proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s1{0}".format(i)], wait=False)
93 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=0), timeout=self.fs.beacon_timeout);
94 self.delete_mds_coredump(rank0['name']);
95
96 self.fs.rank_fail(rank=0)
97 self.fs.mds_restart(rank0['name'])
98 self.wait_for_daemon_start([rank0['name']])
99 status = self.fs.wait_for_daemons()
100
101 proc.wait()
102 last_created += 1
103 self.wait_until_true(lambda: self._get_last_created_snap(rank=0) == last_created, timeout=30)
104
105 self.set_conf("mds", "mds_reconnect_timeout", "5")
106
107 self.mount_a.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
108
109 # set mds_kill_mdstable_at, also kill snapclient
110 for i in [2,5,6]:
111 log.info("testing snapserver mds_kill_mdstable_at={0}, also kill snapclient".format(i))
112 status = self.fs.status()
113 last_created = self._get_last_created_snap(rank=0, status=status)
114
115 rank0 = self.fs.get_rank(rank=0, status=status)
116 rank1 = self.fs.get_rank(rank=1, status=status)
117 self.fs.rank_freeze(True, rank=0) # prevent failover...
118 self.fs.rank_freeze(True, rank=1) # prevent failover...
119 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i)], rank=0, status=status)
120 proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s2{0}".format(i)], wait=False)
121 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=0), timeout=self.fs.beacon_timeout);
122 self.delete_mds_coredump(rank0['name']);
123
124 self.fs.rank_signal(signal.SIGKILL, rank=1)
125
126 self.mount_a.kill()
127 self.mount_a.kill_cleanup()
128
129 self.fs.rank_fail(rank=0)
130 self.fs.mds_restart(rank0['name'])
131 self.wait_for_daemon_start([rank0['name']])
132
133 self.fs.wait_for_state('up:resolve', rank=0, timeout=MDS_RESTART_GRACE)
134 if i in [2,5]:
135 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 1)
136 elif i == 6:
137 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 0)
138 self.assertGreater(self._get_last_created_snap(rank=0), last_created)
139
140 self.fs.rank_fail(rank=1)
141 self.fs.mds_restart(rank1['name'])
142 self.wait_for_daemon_start([rank1['name']])
143 self.fs.wait_for_state('up:active', rank=0, timeout=MDS_RESTART_GRACE)
144
145 if i in [2,5]:
146 self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30)
147 if i == 2:
148 self.assertEqual(self._get_last_created_snap(rank=0), last_created)
149 else:
150 self.assertGreater(self._get_last_created_snap(rank=0), last_created)
151
152 self.mount_a.mount_wait()
153
154 self.mount_a.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
155
156 # mds_kill_mdstable_at:
157 # 3: MDSTableClient::handle_request (got agree)
158 # 4: MDSTableClient::commit
159 # 7: MDSTableClient::handle_request (got ack)
160 for i in [3,4,7]:
161 log.info("testing snapclient mds_kill_mdstable_at={0}".format(i))
162 last_created = self._get_last_created_snap(rank=0)
163
164 status = self.fs.status()
165 rank1 = self.fs.get_rank(rank=1, status=status)
166 self.fs.rank_freeze(True, rank=1) # prevent failover...
167 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i)], rank=1, status=status)
168 proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s3{0}".format(i)], wait=False)
169 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=1), timeout=self.fs.beacon_timeout);
170 self.delete_mds_coredump(rank1['name']);
171
172 self.mount_a.kill()
173 self.mount_a.kill_cleanup()
174
175 if i in [3,4]:
176 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 1)
177 elif i == 7:
178 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 0)
179 self.assertGreater(self._get_last_created_snap(rank=0), last_created)
180
181 self.fs.rank_fail(rank=1)
182 self.fs.mds_restart(rank1['name'])
183 self.wait_for_daemon_start([rank1['name']])
184 status = self.fs.wait_for_daemons(timeout=MDS_RESTART_GRACE)
185
186 if i in [3,4]:
187 self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30)
188 if i == 3:
189 self.assertEqual(self._get_last_created_snap(rank=0), last_created)
190 else:
191 self.assertGreater(self._get_last_created_snap(rank=0), last_created)
192
193 self.mount_a.mount_wait()
194
195 self.mount_a.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
196
197 # mds_kill_mdstable_at:
198 # 3: MDSTableClient::handle_request (got agree)
199 # 8: MDSTableServer::handle_rollback
200 log.info("testing snapclient mds_kill_mdstable_at=3, snapserver mds_kill_mdstable_at=8")
201 last_created = self._get_last_created_snap(rank=0)
202
203 status = self.fs.status()
204 rank0 = self.fs.get_rank(rank=0, status=status)
205 rank1 = self.fs.get_rank(rank=1, status=status)
206 self.fs.rank_freeze(True, rank=0)
207 self.fs.rank_freeze(True, rank=1)
208 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "8"], rank=0, status=status)
209 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "3"], rank=1, status=status)
210 proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s4"], wait=False)
211 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=1), timeout=self.fs.beacon_timeout);
212 self.delete_mds_coredump(rank1['name']);
213
214 self.mount_a.kill()
215 self.mount_a.kill_cleanup()
216
217 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 1)
218
219 self.fs.rank_fail(rank=1)
220 self.fs.mds_restart(rank1['name'])
221 self.wait_for_daemon_start([rank1['name']])
222
223 # rollback triggers assertion
224 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=0), timeout=self.fs.beacon_timeout);
225 self.delete_mds_coredump(rank0['name']);
226 self.fs.rank_fail(rank=0)
227 self.fs.mds_restart(rank0['name'])
228 self.wait_for_daemon_start([rank0['name']])
229 self.fs.wait_for_state('up:active', rank=0, timeout=MDS_RESTART_GRACE)
230
231 # mds.1 should re-send rollback message
232 self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30)
233 self.assertEqual(self._get_last_created_snap(rank=0), last_created)
234
235 self.mount_a.mount_wait()
236
237 def test_snapclient_cache(self):
238 """
239 check if snapclient cache gets synced properly
240 """
241 self.fs.set_allow_new_snaps(True);
242 self.fs.set_max_mds(3)
243 status = self.fs.wait_for_daemons()
244
245 self.mount_a.run_shell(["mkdir", "-p", "d0/d1/dir"])
246 self.mount_a.run_shell(["mkdir", "-p", "d0/d2/dir"])
247 self.mount_a.setfattr("d0", "ceph.dir.pin", "0")
248 self.mount_a.setfattr("d0/d1", "ceph.dir.pin", "1")
249 self.mount_a.setfattr("d0/d2", "ceph.dir.pin", "2")
250 self._wait_subtrees([("/d0", 0), ("/d0/d1", 1), ("/d0/d2", 2)], rank="all", status=status, path="/d0")
251
252 def _check_snapclient_cache(snaps_dump, cache_dump=None, rank=0):
253 if cache_dump is None:
254 cache_dump = self._get_snapclient_dump(rank=rank)
255 for key, value in cache_dump.items():
256 if value != snaps_dump[key]:
257 return False
258 return True;
259
260 # sync after mksnap
261 last_created = self._get_last_created_snap(rank=0)
262 self.mount_a.run_shell(["mkdir", "d0/d1/dir/.snap/s1", "d0/d1/dir/.snap/s2"])
263 self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30)
264 self.assertGreater(self._get_last_created_snap(rank=0), last_created)
265
266 snaps_dump = self._get_snapserver_dump(rank=0)
267 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=0));
268 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=1));
269 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=2));
270
271 # sync after rmsnap
272 last_destroyed = self._get_last_destroyed_snap(rank=0)
273 self.mount_a.run_shell(["rmdir", "d0/d1/dir/.snap/s1"])
274 self.wait_until_true(lambda: len(self._get_pending_snap_destroy(rank=0)) == 0, timeout=30)
275 self.assertGreater(self._get_last_destroyed_snap(rank=0), last_destroyed)
276
277 snaps_dump = self._get_snapserver_dump(rank=0)
278 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=0));
279 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=1));
280 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=2));
281
282 # sync during mds recovers
283 self.fs.rank_fail(rank=2)
284 status = self.fs.wait_for_daemons(timeout=MDS_RESTART_GRACE)
285 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=2));
286
287 self.fs.rank_fail(rank=0)
288 self.fs.rank_fail(rank=1)
289 status = self.fs.wait_for_daemons()
290 self.fs.wait_for_state('up:active', rank=0, timeout=MDS_RESTART_GRACE)
291 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=0));
292 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=1));
293 self.assertTrue(_check_snapclient_cache(snaps_dump, rank=2));
294
295 # kill at MDSTableClient::handle_notify_prep
296 status = self.fs.status()
297 rank2 = self.fs.get_rank(rank=2, status=status)
298 self.fs.rank_freeze(True, rank=2)
299 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "9"], rank=2, status=status)
300 proc = self.mount_a.run_shell(["mkdir", "d0/d1/dir/.snap/s3"], wait=False)
301 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=2), timeout=self.fs.beacon_timeout);
302 self.delete_mds_coredump(rank2['name']);
303
304 # mksnap should wait for notify ack from mds.2
305 self.assertFalse(proc.finished);
306
307 # mksnap should proceed after mds.2 fails
308 self.fs.rank_fail(rank=2)
309 self.wait_until_true(lambda: proc.finished, timeout=30);
310
311 self.fs.mds_restart(rank2['name'])
312 self.wait_for_daemon_start([rank2['name']])
313 status = self.fs.wait_for_daemons(timeout=MDS_RESTART_GRACE)
314
315 self.mount_a.run_shell(["rmdir", Raw("d0/d1/dir/.snap/*")])
316
317 # kill at MDSTableClient::commit
318 # the recovering mds should sync all mds' cache when it enters resolve stage
319 self.set_conf("mds", "mds_reconnect_timeout", "5")
320 for i in range(1, 4):
321 status = self.fs.status()
322 rank2 = self.fs.get_rank(rank=2, status=status)
323 self.fs.rank_freeze(True, rank=2)
324 self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "4"], rank=2, status=status)
325 last_created = self._get_last_created_snap(rank=0)
326 proc = self.mount_a.run_shell(["mkdir", "d0/d2/dir/.snap/s{0}".format(i)], wait=False)
327 self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=2), timeout=self.fs.beacon_timeout);
328 self.delete_mds_coredump(rank2['name']);
329
330 self.mount_a.kill()
331 self.mount_a.kill_cleanup()
332
333 self.assertEqual(len(self._get_pending_snap_update(rank=0)), 1)
334
335 if i in [2,4]:
336 self.fs.rank_fail(rank=0)
337 if i in [3,4]:
338 self.fs.rank_fail(rank=1)
339
340 self.fs.rank_fail(rank=2)
341 self.fs.mds_restart(rank2['name'])
342 self.wait_for_daemon_start([rank2['name']])
343 status = self.fs.wait_for_daemons(timeout=MDS_RESTART_GRACE)
344
345 rank0_cache = self._get_snapclient_dump(rank=0)
346 rank1_cache = self._get_snapclient_dump(rank=1)
347 rank2_cache = self._get_snapclient_dump(rank=2)
348
349 self.assertGreater(int(rank0_cache["last_created"]), last_created)
350 self.assertEqual(rank0_cache, rank1_cache);
351 self.assertEqual(rank0_cache, rank2_cache);
352
353 self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30)
354
355 snaps_dump = self._get_snapserver_dump(rank=0)
356 self.assertEqual(snaps_dump["last_created"], rank0_cache["last_created"])
357 self.assertTrue(_check_snapclient_cache(snaps_dump, cache_dump=rank0_cache));
358
359 self.mount_a.mount_wait()
360
361 self.mount_a.run_shell(["rmdir", Raw("d0/d2/dir/.snap/*")])
362
363 def test_multimds_mksnap(self):
364 """
365 check if snapshot takes effect across authority subtrees
366 """
367 self.fs.set_allow_new_snaps(True);
368 self.fs.set_max_mds(2)
369 status = self.fs.wait_for_daemons()
370
371 self.mount_a.run_shell(["mkdir", "-p", "d0/d1/empty"])
372 self.mount_a.setfattr("d0", "ceph.dir.pin", "0")
373 self.mount_a.setfattr("d0/d1", "ceph.dir.pin", "1")
374 self._wait_subtrees([("/d0", 0), ("/d0/d1", 1)], rank="all", status=status, path="/d0")
375
376 self.mount_a.write_test_pattern("d0/d1/file_a", 8 * 1024 * 1024)
377 self.mount_a.run_shell(["mkdir", "d0/.snap/s1"])
378 self.mount_a.run_shell(["rm", "-f", "d0/d1/file_a"])
379 self.mount_a.validate_test_pattern("d0/.snap/s1/d1/file_a", 8 * 1024 * 1024)
380
381 self.mount_a.run_shell(["rmdir", "d0/.snap/s1"])
382 self.mount_a.run_shell(["rm", "-rf", "d0"])
383
384 def test_multimds_past_parents(self):
385 """
386 check if past parents are properly recorded during across authority rename
387 """
388 self.fs.set_allow_new_snaps(True);
389 self.fs.set_max_mds(2)
390 status = self.fs.wait_for_daemons()
391
392 self.mount_a.run_shell_payload("mkdir -p {d0,d1}/empty")
393 self.mount_a.setfattr("d0", "ceph.dir.pin", "0")
394 self.mount_a.setfattr("d1", "ceph.dir.pin", "1")
395 self._wait_subtrees([("/d0", 0), ("/d1", 1)], rank=0, status=status)
396
397 self.mount_a.run_shell(["mkdir", "d0/d3"])
398 self.mount_a.run_shell(["mkdir", "d0/.snap/s1"])
399 snap_name = self.mount_a.run_shell(["ls", "d0/d3/.snap"]).stdout.getvalue()
400
401 self.mount_a.run_shell(["mv", "d0/d3", "d1/d3"])
402 snap_name1 = self.mount_a.run_shell(["ls", "d1/d3/.snap"]).stdout.getvalue()
403 self.assertEqual(snap_name1, snap_name);
404
405 self.mount_a.run_shell(["rmdir", "d0/.snap/s1"])
406 snap_name1 = self.mount_a.run_shell(["ls", "d1/d3/.snap"]).stdout.getvalue()
407 self.assertEqual(snap_name1, "");
408
409 self.mount_a.run_shell(["rm", "-rf", "d0", "d1"])
410
411 def test_multimds_hardlink(self):
412 """
413 check if hardlink snapshot works in multimds setup
414 """
415 self.fs.set_allow_new_snaps(True);
416 self.fs.set_max_mds(2)
417 status = self.fs.wait_for_daemons()
418
419 self.mount_a.run_shell_payload("mkdir -p {d0,d1}/empty")
420
421 self.mount_a.setfattr("d0", "ceph.dir.pin", "0")
422 self.mount_a.setfattr("d1", "ceph.dir.pin", "1")
423 self._wait_subtrees([("/d0", 0), ("/d1", 1)], rank=0, status=status)
424
425 self.mount_a.run_python(dedent("""
426 import os
427 open(os.path.join("{path}", "d0/file1"), 'w').write("asdf")
428 open(os.path.join("{path}", "d0/file2"), 'w').write("asdf")
429 """.format(path=self.mount_a.mountpoint)
430 ))
431
432 self.mount_a.run_shell(["ln", "d0/file1", "d1/file1"])
433 self.mount_a.run_shell(["ln", "d0/file2", "d1/file2"])
434
435 self.mount_a.run_shell(["mkdir", "d1/.snap/s1"])
436
437 self.mount_a.run_python(dedent("""
438 import os
439 open(os.path.join("{path}", "d0/file1"), 'w').write("qwer")
440 """.format(path=self.mount_a.mountpoint)
441 ))
442
443 self.mount_a.run_shell(["grep", "asdf", "d1/.snap/s1/file1"])
444
445 self.mount_a.run_shell(["rm", "-f", "d0/file2"])
446 self.mount_a.run_shell(["grep", "asdf", "d1/.snap/s1/file2"])
447
448 self.mount_a.run_shell(["rm", "-f", "d1/file2"])
449 self.mount_a.run_shell(["grep", "asdf", "d1/.snap/s1/file2"])
450
451 self.mount_a.run_shell(["rmdir", "d1/.snap/s1"])
452 self.mount_a.run_shell(["rm", "-rf", "d0", "d1"])
453
454 class SnapLimitViolationException(Exception):
455 failed_snapshot_number = -1
456
457 def __init__(self, num):
458 self.failed_snapshot_number = num
459
460 def get_snap_name(self, dir_name, sno):
461 sname = "{dir_name}/.snap/s_{sno}".format(dir_name=dir_name, sno=sno)
462 return sname
463
464 def create_snap_dir(self, sname):
465 self.mount_a.run_shell(["mkdir", sname])
466
467 def delete_dir_and_snaps(self, dir_name, snaps):
468 for sno in range(1, snaps+1, 1):
469 sname = self.get_snap_name(dir_name, sno)
470 self.mount_a.run_shell(["rmdir", sname])
471 self.mount_a.run_shell(["rmdir", dir_name])
472
473 def create_dir_and_snaps(self, dir_name, snaps):
474 self.mount_a.run_shell(["mkdir", dir_name])
475
476 for sno in range(1, snaps+1, 1):
477 sname = self.get_snap_name(dir_name, sno)
478 try:
479 self.create_snap_dir(sname)
480 except CommandFailedError as e:
481 # failing at the last mkdir beyond the limit is expected
482 if sno == snaps:
483 log.info("failed while creating snap #{}: {}".format(sno, repr(e)))
484 raise TestSnapshots.SnapLimitViolationException(sno)
485
486 def test_mds_max_snaps_per_dir_default_limit(self):
487 """
488 Test the newly introudced option named mds_max_snaps_per_dir
489 Default snaps limit is 100
490 Test if the default number of snapshot directories can be created
491 """
492 self.create_dir_and_snaps("accounts", int(self.mds_max_snaps_per_dir))
493 self.delete_dir_and_snaps("accounts", int(self.mds_max_snaps_per_dir))
494
495 def test_mds_max_snaps_per_dir_with_increased_limit(self):
496 """
497 Test the newly introudced option named mds_max_snaps_per_dir
498 First create 101 directories and ensure that the 101st directory
499 creation fails. Then increase the default by one and see if the
500 additional directory creation succeeds
501 """
502 # first test the default limit
503 new_limit = int(self.mds_max_snaps_per_dir)
504 self.fs.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit)])
505 try:
506 self.create_dir_and_snaps("accounts", new_limit + 1)
507 except TestSnapshots.SnapLimitViolationException as e:
508 if e.failed_snapshot_number == (new_limit + 1):
509 pass
510 # then increase the limit by one and test
511 new_limit = new_limit + 1
512 self.fs.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit)])
513 sname = self.get_snap_name("accounts", new_limit)
514 self.create_snap_dir(sname)
515 self.delete_dir_and_snaps("accounts", new_limit)
516
517 def test_mds_max_snaps_per_dir_with_reduced_limit(self):
518 """
519 Test the newly introudced option named mds_max_snaps_per_dir
520 First create 99 directories. Then reduce the limit to 98. Then try
521 creating another directory and ensure that additional directory
522 creation fails.
523 """
524 # first test the new limit
525 new_limit = int(self.mds_max_snaps_per_dir) - 1
526 self.create_dir_and_snaps("accounts", new_limit)
527 sname = self.get_snap_name("accounts", new_limit + 1)
528 # then reduce the limit by one and test
529 new_limit = new_limit - 1
530 self.fs.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit)])
531 try:
532 self.create_snap_dir(sname)
533 except CommandFailedError:
534 # after reducing limit we expect the new snapshot creation to fail
535 pass
536 self.delete_dir_and_snaps("accounts", new_limit + 1)