]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_failover.py
import 15.2.4
[ceph.git] / ceph / qa / tasks / cephfs / test_failover.py
1 import time
2 import signal
3 import logging
4 import operator
5 from random import randint
6 from six.moves import range
7
8 from tasks.cephfs.cephfs_test_case import CephFSTestCase
9 from teuthology.exceptions import CommandFailedError
10 from tasks.cephfs.fuse_mount import FuseMount
11
12 log = logging.getLogger(__name__)
13
14 class TestClusterAffinity(CephFSTestCase):
15 CLIENTS_REQUIRED = 0
16 MDSS_REQUIRED = 4
17
18 def _verify_join_fs(self, target, status=None):
19 if status is None:
20 status = self.fs.wait_for_daemons(timeout=30)
21 log.debug("%s", status)
22 target = sorted(target, key=operator.itemgetter('name'))
23 log.info("target = %s", target)
24 current = list(status.get_all())
25 current = sorted(current, key=operator.itemgetter('name'))
26 log.info("current = %s", current)
27 self.assertEqual(len(current), len(target))
28 for i in range(len(current)):
29 for attr in target[i]:
30 self.assertIn(attr, current[i])
31 self.assertEqual(target[i][attr], current[i][attr])
32
33 def _change_target_state(self, state, name, changes):
34 for entity in state:
35 if entity['name'] == name:
36 for k, v in changes.items():
37 entity[k] = v
38 return
39 self.fail("no entity")
40
41 def _verify_init(self):
42 status = self.fs.status()
43 log.info("status = {0}".format(status))
44 target = [{'join_fscid': -1, 'name': info['name']} for info in status.get_all()]
45 self._verify_join_fs(target, status=status)
46 return (status, target)
47
48 def _reach_target(self, target):
49 def takeover():
50 try:
51 self._verify_join_fs(target)
52 return True
53 except AssertionError as e:
54 log.debug("%s", e)
55 return False
56 status = self.wait_until_true(takeover, 30)
57
58 def test_join_fs_runtime(self):
59 """
60 That setting mds_join_fs at runtime affects the cluster layout.
61 """
62 status, target = self._verify_init()
63 standbys = list(status.get_standbys())
64 self.config_set('mds.'+standbys[0]['name'], 'mds_join_fs', 'cephfs')
65 self._change_target_state(target, standbys[0]['name'], {'join_fscid': self.fs.id, 'state': 'up:active'})
66 self._reach_target(target)
67
68 def test_join_fs_unset(self):
69 """
70 That unsetting mds_join_fs will cause failover if another high-affinity standby exists.
71 """
72 status, target = self._verify_init()
73 standbys = list(status.get_standbys())
74 names = (standbys[0]['name'], standbys[1]['name'])
75 self.config_set('mds.'+names[0], 'mds_join_fs', 'cephfs')
76 self.config_set('mds.'+names[1], 'mds_join_fs', 'cephfs')
77 self._change_target_state(target, names[0], {'join_fscid': self.fs.id})
78 self._change_target_state(target, names[1], {'join_fscid': self.fs.id})
79 self._reach_target(target)
80 status = self.fs.status()
81 active = self.fs.get_active_names(status=status)[0]
82 self.assertIn(active, names)
83 self.config_rm('mds.'+active, 'mds_join_fs')
84 self._change_target_state(target, active, {'join_fscid': -1})
85 new_active = (set(names) - set((active,))).pop()
86 self._change_target_state(target, new_active, {'state': 'up:active'})
87 self._reach_target(target)
88
89 def test_join_fs_drop(self):
90 """
91 That unsetting mds_join_fs will not cause failover if no high-affinity standby exists.
92 """
93 status, target = self._verify_init()
94 standbys = list(status.get_standbys())
95 active = standbys[0]['name']
96 self.config_set('mds.'+active, 'mds_join_fs', 'cephfs')
97 self._change_target_state(target, active, {'join_fscid': self.fs.id, 'state': 'up:active'})
98 self._reach_target(target)
99 self.config_rm('mds.'+active, 'mds_join_fs')
100 self._change_target_state(target, active, {'join_fscid': -1})
101 self._reach_target(target)
102
103 def test_join_fs_vanilla(self):
104 """
105 That a vanilla standby is preferred over others with mds_join_fs set to another fs.
106 """
107 self.fs.set_allow_multifs()
108 fs2 = self.mds_cluster.newfs(name="cephfs2")
109 status, target = self._verify_init()
110 active = self.fs.get_active_names(status=status)[0]
111 standbys = [info['name'] for info in status.get_standbys()]
112 victim = standbys.pop()
113 # Set a bogus fs on the others
114 for mds in standbys:
115 self.config_set('mds.'+mds, 'mds_join_fs', 'cephfs2')
116 self._change_target_state(target, mds, {'join_fscid': fs2.id})
117 self.fs.rank_fail()
118 self._change_target_state(target, victim, {'state': 'up:active'})
119 self._reach_target(target)
120 status = self.fs.status()
121 active = self.fs.get_active_names(status=status)[0]
122 self.assertEqual(active, victim)
123
124 def test_join_fs_last_resort(self):
125 """
126 That a standby with mds_join_fs set to another fs is still used if necessary.
127 """
128 status, target = self._verify_init()
129 active = self.fs.get_active_names(status=status)[0]
130 standbys = [info['name'] for info in status.get_standbys()]
131 for mds in standbys:
132 self.config_set('mds.'+mds, 'mds_join_fs', 'cephfs2')
133 self.fs.set_allow_multifs()
134 fs2 = self.mds_cluster.newfs(name="cephfs2")
135 for mds in standbys:
136 self._change_target_state(target, mds, {'join_fscid': fs2.id})
137 self.fs.rank_fail()
138 status = self.fs.status()
139 ranks = list(self.fs.get_ranks(status=status))
140 self.assertEqual(len(ranks), 1)
141 self.assertIn(ranks[0]['name'], standbys)
142 # Note that we would expect the former active to reclaim its spot, but
143 # we're not testing that here.
144
145 def test_join_fs_steady(self):
146 """
147 That a sole MDS with mds_join_fs set will come back as active eventually even after failover.
148 """
149 status, target = self._verify_init()
150 active = self.fs.get_active_names(status=status)[0]
151 self.config_set('mds.'+active, 'mds_join_fs', 'cephfs')
152 self._change_target_state(target, active, {'join_fscid': self.fs.id})
153 self._reach_target(target)
154 self.fs.rank_fail()
155 self._reach_target(target)
156
157 def test_join_fs_standby_replay(self):
158 """
159 That a standby-replay daemon with weak affinity is replaced by a stronger one.
160 """
161 status, target = self._verify_init()
162 standbys = [info['name'] for info in status.get_standbys()]
163 self.config_set('mds.'+standbys[0], 'mds_join_fs', 'cephfs')
164 self._change_target_state(target, standbys[0], {'join_fscid': self.fs.id, 'state': 'up:active'})
165 self._reach_target(target)
166 self.fs.set_allow_standby_replay(True)
167 status = self.fs.status()
168 standbys = [info['name'] for info in status.get_standbys()]
169 self.config_set('mds.'+standbys[0], 'mds_join_fs', 'cephfs')
170 self._change_target_state(target, standbys[0], {'join_fscid': self.fs.id, 'state': 'up:standby-replay'})
171 self._reach_target(target)
172
173 class TestClusterResize(CephFSTestCase):
174 CLIENTS_REQUIRED = 1
175 MDSS_REQUIRED = 3
176
177 def grow(self, n):
178 grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon"))
179
180 fscid = self.fs.id
181 status = self.fs.status()
182 log.info("status = {0}".format(status))
183
184 original_ranks = set([info['gid'] for info in status.get_ranks(fscid)])
185 _ = set([info['gid'] for info in status.get_standbys()])
186
187 oldmax = self.fs.get_var('max_mds')
188 self.assertTrue(n > oldmax)
189 self.fs.set_max_mds(n)
190
191 log.info("Waiting for cluster to grow.")
192 status = self.fs.wait_for_daemons(timeout=60+grace*2)
193 ranks = set([info['gid'] for info in status.get_ranks(fscid)])
194 self.assertTrue(original_ranks.issubset(ranks) and len(ranks) == n)
195 return status
196
197 def shrink(self, n):
198 grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon"))
199
200 fscid = self.fs.id
201 status = self.fs.status()
202 log.info("status = {0}".format(status))
203
204 original_ranks = set([info['gid'] for info in status.get_ranks(fscid)])
205 _ = set([info['gid'] for info in status.get_standbys()])
206
207 oldmax = self.fs.get_var('max_mds')
208 self.assertTrue(n < oldmax)
209 self.fs.set_max_mds(n)
210
211 # Wait until the monitor finishes stopping ranks >= n
212 log.info("Waiting for cluster to shink.")
213 status = self.fs.wait_for_daemons(timeout=60+grace*2)
214 ranks = set([info['gid'] for info in status.get_ranks(fscid)])
215 self.assertTrue(ranks.issubset(original_ranks) and len(ranks) == n)
216 return status
217
218
219 def test_grow(self):
220 """
221 That the MDS cluster grows after increasing max_mds.
222 """
223
224 # Need all my standbys up as well as the active daemons
225 # self.wait_for_daemon_start() necessary?
226
227 self.grow(2)
228 self.grow(3)
229
230
231 def test_shrink(self):
232 """
233 That the MDS cluster shrinks automatically after decreasing max_mds.
234 """
235
236 self.grow(3)
237 self.shrink(1)
238
239 def test_up_less_than_max(self):
240 """
241 That a health warning is generated when max_mds is greater than active count.
242 """
243
244 status = self.fs.status()
245 mdss = [info['gid'] for info in status.get_all()]
246 self.fs.set_max_mds(len(mdss)+1)
247 self.wait_for_health("MDS_UP_LESS_THAN_MAX", 30)
248 self.shrink(2)
249 self.wait_for_health_clear(30)
250
251 def test_down_health(self):
252 """
253 That marking a FS down does not generate a health warning
254 """
255
256 self.mount_a.umount_wait()
257
258 self.fs.set_down()
259 try:
260 self.wait_for_health("", 30)
261 raise RuntimeError("got health warning?")
262 except RuntimeError as e:
263 if "Timed out after" in str(e):
264 pass
265 else:
266 raise
267
268 def test_down_twice(self):
269 """
270 That marking a FS down twice does not wipe old_max_mds.
271 """
272
273 self.mount_a.umount_wait()
274
275 self.grow(2)
276 self.fs.set_down()
277 self.fs.wait_for_daemons()
278 self.fs.set_down(False)
279 self.assertEqual(self.fs.get_var("max_mds"), 2)
280 self.fs.wait_for_daemons(timeout=60)
281
282 def test_down_grow(self):
283 """
284 That setting max_mds undoes down.
285 """
286
287 self.mount_a.umount_wait()
288
289 self.fs.set_down()
290 self.fs.wait_for_daemons()
291 self.grow(2)
292 self.fs.wait_for_daemons()
293
294 def test_down(self):
295 """
296 That down setting toggles and sets max_mds appropriately.
297 """
298
299 self.mount_a.umount_wait()
300
301 self.fs.set_down()
302 self.fs.wait_for_daemons()
303 self.assertEqual(self.fs.get_var("max_mds"), 0)
304 self.fs.set_down(False)
305 self.assertEqual(self.fs.get_var("max_mds"), 1)
306 self.fs.wait_for_daemons()
307 self.assertEqual(self.fs.get_var("max_mds"), 1)
308
309 def test_hole(self):
310 """
311 Test that a hole cannot be created in the FS ranks.
312 """
313
314 fscid = self.fs.id
315
316 self.grow(2)
317
318 self.fs.set_max_mds(1)
319 log.info("status = {0}".format(self.fs.status()))
320
321 self.fs.set_max_mds(3)
322 # Don't wait for rank 1 to stop
323
324 self.fs.set_max_mds(2)
325 # Prevent another MDS from taking rank 1
326 # XXX This is a little racy because rank 1 may have stopped and a
327 # standby assigned to rank 1 before joinable=0 is set.
328 self.fs.set_joinable(False) # XXX keep in mind changing max_mds clears this flag
329
330 try:
331 status = self.fs.wait_for_daemons(timeout=90)
332 raise RuntimeError("should not be able to successfully shrink cluster!")
333 except:
334 # could not shrink to max_mds=2 and reach 2 actives (because joinable=False)
335 status = self.fs.status()
336 ranks = set([info['rank'] for info in status.get_ranks(fscid)])
337 self.assertTrue(ranks == set([0]))
338 finally:
339 log.info("status = {0}".format(status))
340
341 def test_thrash(self):
342 """
343 Test that thrashing max_mds does not fail.
344 """
345
346 max_mds = 2
347 for i in range(0, 100):
348 self.fs.set_max_mds(max_mds)
349 max_mds = (max_mds+1)%3+1
350
351 self.fs.wait_for_daemons(timeout=90)
352
353 class TestFailover(CephFSTestCase):
354 CLIENTS_REQUIRED = 1
355 MDSS_REQUIRED = 2
356
357 def test_simple(self):
358 """
359 That when the active MDS is killed, a standby MDS is promoted into
360 its rank after the grace period.
361
362 This is just a simple unit test, the harder cases are covered
363 in thrashing tests.
364 """
365
366 # Need all my standbys up as well as the active daemons
367 self.wait_for_daemon_start()
368
369 (original_active, ) = self.fs.get_active_names()
370 original_standbys = self.mds_cluster.get_standby_daemons()
371
372 # Kill the rank 0 daemon's physical process
373 self.fs.mds_stop(original_active)
374
375 grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon"))
376
377 # Wait until the monitor promotes his replacement
378 def promoted():
379 active = self.fs.get_active_names()
380 return active and active[0] in original_standbys
381
382 log.info("Waiting for promotion of one of the original standbys {0}".format(
383 original_standbys))
384 self.wait_until_true(
385 promoted,
386 timeout=grace*2)
387
388 # Start the original rank 0 daemon up again, see that he becomes a standby
389 self.fs.mds_restart(original_active)
390 self.wait_until_true(
391 lambda: original_active in self.mds_cluster.get_standby_daemons(),
392 timeout=60 # Approximately long enough for MDS to start and mon to notice
393 )
394
395 def test_client_abort(self):
396 """
397 That a client will respect fuse_require_active_mds and error out
398 when the cluster appears to be unavailable.
399 """
400
401 if not isinstance(self.mount_a, FuseMount):
402 self.skipTest("Requires FUSE client to inject client metadata")
403
404 require_active = self.fs.get_config("fuse_require_active_mds", service_type="mon").lower() == "true"
405 if not require_active:
406 self.skipTest("fuse_require_active_mds is not set")
407
408 grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon"))
409
410 # Check it's not laggy to begin with
411 (original_active, ) = self.fs.get_active_names()
412 self.assertNotIn("laggy_since", self.fs.status().get_mds(original_active))
413
414 self.mounts[0].umount_wait()
415
416 # Control: that we can mount and unmount usually, while the cluster is healthy
417 self.mounts[0].mount_wait()
418 self.mounts[0].umount_wait()
419
420 # Stop the daemon processes
421 self.fs.mds_stop()
422
423 # Wait for everyone to go laggy
424 def laggy():
425 mdsmap = self.fs.get_mds_map()
426 for info in mdsmap['info'].values():
427 if "laggy_since" not in info:
428 return False
429
430 return True
431
432 self.wait_until_true(laggy, grace * 2)
433 with self.assertRaises(CommandFailedError):
434 self.mounts[0].mount_wait()
435
436 def test_standby_count_wanted(self):
437 """
438 That cluster health warnings are generated by insufficient standbys available.
439 """
440
441 # Need all my standbys up as well as the active daemons
442 self.wait_for_daemon_start()
443
444 grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon"))
445
446 standbys = self.mds_cluster.get_standby_daemons()
447 self.assertGreaterEqual(len(standbys), 1)
448 self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)))
449
450 # Kill a standby and check for warning
451 victim = standbys.pop()
452 self.fs.mds_stop(victim)
453 log.info("waiting for insufficient standby daemon warning")
454 self.wait_for_health("MDS_INSUFFICIENT_STANDBY", grace*2)
455
456 # restart the standby, see that he becomes a standby, check health clears
457 self.fs.mds_restart(victim)
458 self.wait_until_true(
459 lambda: victim in self.mds_cluster.get_standby_daemons(),
460 timeout=60 # Approximately long enough for MDS to start and mon to notice
461 )
462 self.wait_for_health_clear(timeout=30)
463
464 # Set it one greater than standbys ever seen
465 standbys = self.mds_cluster.get_standby_daemons()
466 self.assertGreaterEqual(len(standbys), 1)
467 self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)+1))
468 log.info("waiting for insufficient standby daemon warning")
469 self.wait_for_health("MDS_INSUFFICIENT_STANDBY", grace*2)
470
471 # Set it to 0
472 self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', '0')
473 self.wait_for_health_clear(timeout=30)
474
475 def test_discontinuous_mdsmap(self):
476 """
477 That discontinuous mdsmap does not affect failover.
478 See http://tracker.ceph.com/issues/24856.
479 """
480 self.fs.set_max_mds(2)
481 status = self.fs.wait_for_daemons()
482
483 self.mount_a.umount_wait()
484
485 grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon"))
486 monc_timeout = float(self.fs.get_config("mon_client_ping_timeout", service_type="mds"))
487
488 mds_0 = self.fs.get_rank(rank=0, status=status)
489 self.fs.rank_freeze(True, rank=0) # prevent failover
490 self.fs.rank_signal(signal.SIGSTOP, rank=0, status=status)
491 self.wait_until_true(
492 lambda: "laggy_since" in self.fs.get_rank(),
493 timeout=grace * 2
494 )
495
496 self.fs.rank_fail(rank=1)
497 self.fs.wait_for_state('up:resolve', rank=1, timeout=30)
498
499 # Make sure of mds_0's monitor connection gets reset
500 time.sleep(monc_timeout * 2)
501
502 # Continue rank 0, it will get discontinuous mdsmap
503 self.fs.rank_signal(signal.SIGCONT, rank=0)
504 self.wait_until_true(
505 lambda: "laggy_since" not in self.fs.get_rank(rank=0),
506 timeout=grace * 2
507 )
508
509 # mds.b will be stuck at 'reconnect' state if snapserver gets confused
510 # by discontinuous mdsmap
511 self.fs.wait_for_state('up:active', rank=1, timeout=30)
512 self.assertEqual(mds_0['gid'], self.fs.get_rank(rank=0)['gid'])
513 self.fs.rank_freeze(False, rank=0)
514
515 class TestStandbyReplay(CephFSTestCase):
516 MDSS_REQUIRED = 4
517
518 def _confirm_no_replay(self):
519 status = self.fs.status()
520 _ = len(list(status.get_standbys()))
521 self.assertEqual(0, len(list(self.fs.get_replays(status=status))))
522 return status
523
524 def _confirm_single_replay(self, full=True, status=None, retries=3):
525 status = self.fs.wait_for_daemons(status=status)
526 ranks = sorted(self.fs.get_mds_map(status=status)['in'])
527 replays = list(self.fs.get_replays(status=status))
528 checked_replays = set()
529 for rank in ranks:
530 has_replay = False
531 for replay in replays:
532 if replay['rank'] == rank:
533 self.assertFalse(has_replay)
534 has_replay = True
535 checked_replays.add(replay['gid'])
536 if full and not has_replay:
537 if retries <= 0:
538 raise RuntimeError("rank "+str(rank)+" has no standby-replay follower")
539 else:
540 retries = retries-1
541 time.sleep(2)
542 self.assertEqual(checked_replays, set(info['gid'] for info in replays))
543 return status
544
545 def _check_replay_takeover(self, status, rank=0):
546 replay = self.fs.get_replay(rank=rank, status=status)
547 new_status = self.fs.wait_for_daemons()
548 new_active = self.fs.get_rank(rank=rank, status=new_status)
549 if replay:
550 self.assertEqual(replay['gid'], new_active['gid'])
551 else:
552 # double check takeover came from a standby (or some new daemon via restart)
553 found = False
554 for info in status.get_standbys():
555 if info['gid'] == new_active['gid']:
556 found = True
557 break
558 if not found:
559 for info in status.get_all():
560 self.assertNotEqual(info['gid'], new_active['gid'])
561 return new_status
562
563 def test_standby_replay_singleton(self):
564 """
565 That only one MDS becomes standby-replay.
566 """
567
568 self._confirm_no_replay()
569 self.fs.set_allow_standby_replay(True)
570 time.sleep(30)
571 self._confirm_single_replay()
572
573 def test_standby_replay_singleton_fail(self):
574 """
575 That failures don't violate singleton constraint.
576 """
577
578 self._confirm_no_replay()
579 self.fs.set_allow_standby_replay(True)
580 status = self._confirm_single_replay()
581
582 for i in range(10):
583 time.sleep(randint(1, 5))
584 self.fs.rank_restart(status=status)
585 status = self._check_replay_takeover(status)
586 status = self._confirm_single_replay(status=status)
587
588 for i in range(10):
589 time.sleep(randint(1, 5))
590 self.fs.rank_fail()
591 status = self._check_replay_takeover(status)
592 status = self._confirm_single_replay(status=status)
593
594 def test_standby_replay_singleton_fail_multimds(self):
595 """
596 That failures don't violate singleton constraint with multiple actives.
597 """
598
599 status = self._confirm_no_replay()
600 new_max_mds = randint(2, len(list(status.get_standbys())))
601 self.fs.set_max_mds(new_max_mds)
602 self.fs.wait_for_daemons() # wait for actives to come online!
603 self.fs.set_allow_standby_replay(True)
604 status = self._confirm_single_replay(full=False)
605
606 for i in range(10):
607 time.sleep(randint(1, 5))
608 victim = randint(0, new_max_mds-1)
609 self.fs.rank_restart(rank=victim, status=status)
610 status = self._check_replay_takeover(status, rank=victim)
611 status = self._confirm_single_replay(status=status, full=False)
612
613 for i in range(10):
614 time.sleep(randint(1, 5))
615 victim = randint(0, new_max_mds-1)
616 self.fs.rank_fail(rank=victim)
617 status = self._check_replay_takeover(status, rank=victim)
618 status = self._confirm_single_replay(status=status, full=False)
619
620 def test_standby_replay_failure(self):
621 """
622 That the failure of a standby-replay daemon happens cleanly
623 and doesn't interrupt anything else.
624 """
625
626 status = self._confirm_no_replay()
627 self.fs.set_max_mds(1)
628 self.fs.set_allow_standby_replay(True)
629 status = self._confirm_single_replay()
630
631 for i in range(10):
632 time.sleep(randint(1, 5))
633 victim = self.fs.get_replay(status=status)
634 self.fs.mds_restart(mds_id=victim['name'])
635 status = self._confirm_single_replay(status=status)
636
637 def test_rank_stopped(self):
638 """
639 That when a rank is STOPPED, standby replays for
640 that rank get torn down
641 """
642
643 status = self._confirm_no_replay()
644 standby_count = len(list(status.get_standbys()))
645 self.fs.set_max_mds(2)
646 self.fs.set_allow_standby_replay(True)
647 status = self._confirm_single_replay()
648
649 self.fs.set_max_mds(1) # stop rank 1
650
651 status = self._confirm_single_replay()
652 self.assertTrue(standby_count, len(list(status.get_standbys())))
653
654
655 class TestMultiFilesystems(CephFSTestCase):
656 CLIENTS_REQUIRED = 2
657 MDSS_REQUIRED = 4
658
659 # We'll create our own filesystems and start our own daemons
660 REQUIRE_FILESYSTEM = False
661
662 def setUp(self):
663 super(TestMultiFilesystems, self).setUp()
664 self.mds_cluster.mon_manager.raw_cluster_cmd("fs", "flag", "set",
665 "enable_multiple", "true",
666 "--yes-i-really-mean-it")
667
668 def _setup_two(self):
669 fs_a = self.mds_cluster.newfs(name="alpha")
670 fs_b = self.mds_cluster.newfs(name="bravo")
671
672 self.mds_cluster.mds_restart()
673
674 # Wait for both filesystems to go healthy
675 fs_a.wait_for_daemons()
676 fs_b.wait_for_daemons()
677
678 # Reconfigure client auth caps
679 for mount in self.mounts:
680 self.mds_cluster.mon_manager.raw_cluster_cmd_result(
681 'auth', 'caps', "client.{0}".format(mount.client_id),
682 'mds', 'allow',
683 'mon', 'allow r',
684 'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
685 fs_a.get_data_pool_name(), fs_b.get_data_pool_name()))
686
687 return fs_a, fs_b
688
689 def test_clients(self):
690 fs_a, fs_b = self._setup_two()
691
692 # Mount a client on fs_a
693 self.mount_a.mount(mount_fs_name=fs_a.name)
694 self.mount_a.write_n_mb("pad.bin", 1)
695 self.mount_a.write_n_mb("test.bin", 2)
696 a_created_ino = self.mount_a.path_to_ino("test.bin")
697 self.mount_a.create_files()
698
699 # Mount a client on fs_b
700 self.mount_b.mount(mount_fs_name=fs_b.name)
701 self.mount_b.write_n_mb("test.bin", 1)
702 b_created_ino = self.mount_b.path_to_ino("test.bin")
703 self.mount_b.create_files()
704
705 # Check that a non-default filesystem mount survives an MDS
706 # failover (i.e. that map subscription is continuous, not
707 # just the first time), reproduces #16022
708 old_fs_b_mds = fs_b.get_active_names()[0]
709 self.mds_cluster.mds_stop(old_fs_b_mds)
710 self.mds_cluster.mds_fail(old_fs_b_mds)
711 fs_b.wait_for_daemons()
712 background = self.mount_b.write_background()
713 # Raise exception if the write doesn't finish (i.e. if client
714 # has not kept up with MDS failure)
715 try:
716 self.wait_until_true(lambda: background.finished, timeout=30)
717 except RuntimeError:
718 # The mount is stuck, we'll have to force it to fail cleanly
719 background.stdin.close()
720 self.mount_b.umount_wait(force=True)
721 raise
722
723 self.mount_a.umount_wait()
724 self.mount_b.umount_wait()
725
726 # See that the client's files went into the correct pool
727 self.assertTrue(fs_a.data_objects_present(a_created_ino, 1024 * 1024))
728 self.assertTrue(fs_b.data_objects_present(b_created_ino, 1024 * 1024))
729
730 def test_standby(self):
731 fs_a, fs_b = self._setup_two()
732
733 # Assert that the remaining two MDS daemons are now standbys
734 a_daemons = fs_a.get_active_names()
735 b_daemons = fs_b.get_active_names()
736 self.assertEqual(len(a_daemons), 1)
737 self.assertEqual(len(b_daemons), 1)
738 original_a = a_daemons[0]
739 original_b = b_daemons[0]
740 expect_standby_daemons = set(self.mds_cluster.mds_ids) - (set(a_daemons) | set(b_daemons))
741
742 # Need all my standbys up as well as the active daemons
743 self.wait_for_daemon_start()
744 self.assertEqual(expect_standby_daemons, self.mds_cluster.get_standby_daemons())
745
746 # Kill fs_a's active MDS, see a standby take over
747 self.mds_cluster.mds_stop(original_a)
748 self.mds_cluster.mon_manager.raw_cluster_cmd("mds", "fail", original_a)
749 self.wait_until_equal(lambda: len(fs_a.get_active_names()), 1, 30,
750 reject_fn=lambda v: v > 1)
751 # Assert that it's a *different* daemon that has now appeared in the map for fs_a
752 self.assertNotEqual(fs_a.get_active_names()[0], original_a)
753
754 # Kill fs_b's active MDS, see a standby take over
755 self.mds_cluster.mds_stop(original_b)
756 self.mds_cluster.mon_manager.raw_cluster_cmd("mds", "fail", original_b)
757 self.wait_until_equal(lambda: len(fs_b.get_active_names()), 1, 30,
758 reject_fn=lambda v: v > 1)
759 # Assert that it's a *different* daemon that has now appeared in the map for fs_a
760 self.assertNotEqual(fs_b.get_active_names()[0], original_b)
761
762 # Both of the original active daemons should be gone, and all standbys used up
763 self.assertEqual(self.mds_cluster.get_standby_daemons(), set())
764
765 # Restart the ones I killed, see them reappear as standbys
766 self.mds_cluster.mds_restart(original_a)
767 self.mds_cluster.mds_restart(original_b)
768 self.wait_until_true(
769 lambda: {original_a, original_b} == self.mds_cluster.get_standby_daemons(),
770 timeout=30
771 )
772
773 def test_grow_shrink(self):
774 # Usual setup...
775 fs_a, fs_b = self._setup_two()
776
777 # Increase max_mds on fs_b, see a standby take up the role
778 fs_b.set_max_mds(2)
779 self.wait_until_equal(lambda: len(fs_b.get_active_names()), 2, 30,
780 reject_fn=lambda v: v > 2 or v < 1)
781
782 # Increase max_mds on fs_a, see a standby take up the role
783 fs_a.set_max_mds(2)
784 self.wait_until_equal(lambda: len(fs_a.get_active_names()), 2, 30,
785 reject_fn=lambda v: v > 2 or v < 1)
786
787 # Shrink fs_b back to 1, see a daemon go back to standby
788 fs_b.set_max_mds(1)
789 self.wait_until_equal(lambda: len(fs_b.get_active_names()), 1, 30,
790 reject_fn=lambda v: v > 2 or v < 1)
791
792 # Grow fs_a up to 3, see the former fs_b daemon join it.
793 fs_a.set_max_mds(3)
794 self.wait_until_equal(lambda: len(fs_a.get_active_names()), 3, 60,
795 reject_fn=lambda v: v > 3 or v < 2)