import logging
from textwrap import dedent
from teuthology.orchestra.run import CommandFailedError
+from tasks.ceph_test_case import TestTimeoutError
from tasks.cephfs.cephfs_test_case import CephFSTestCase, needs_trimming
from tasks.cephfs.fuse_mount import FuseMount
import os
# ask the client to trim the caps.
cache_memory_limit = "1K"
- self.set_conf('mds', 'mds_cache_memory_limit', cache_memory_limit)
- self.set_conf('mds', 'mds_recall_max_caps', open_files/2)
- self.set_conf('mds', 'mds_recall_warning_threshold', open_files)
- self.fs.mds_fail_restart()
- self.fs.wait_for_daemons()
+ self.config_set('mds', 'mds_cache_memory_limit', cache_memory_limit)
+ self.config_set('mds', 'mds_recall_max_caps', int(open_files/2))
+ self.config_set('mds', 'mds_recall_warning_threshold', open_files)
- mds_min_caps_per_client = int(self.fs.get_config("mds_min_caps_per_client"))
- mds_max_caps_per_client = int(self.fs.get_config("mds_max_caps_per_client"))
- mds_recall_warning_decay_rate = float(self.fs.get_config("mds_recall_warning_decay_rate"))
- self.assertTrue(open_files >= mds_min_caps_per_client)
+ mds_min_caps_per_client = int(self.config_get('mds', "mds_min_caps_per_client"))
+ self.config_set('mds', 'mds_min_caps_working_set', mds_min_caps_per_client)
+ mds_max_caps_per_client = int(self.config_get('mds', "mds_max_caps_per_client"))
+ mds_recall_warning_decay_rate = float(self.config_get('mds', "mds_recall_warning_decay_rate"))
+ self.assertGreaterEqual(open_files, mds_min_caps_per_client)
mount_a_client_id = self.mount_a.get_global_id()
- path = "subdir/mount_a" if use_subdir else "mount_a"
+ path = "subdir" if use_subdir else "."
open_proc = self.mount_a.open_n_background(path, open_files)
# Client should now hold:
def test_client_pin_mincaps(self):
self._test_client_pin(True, 200)
+ def test_client_min_caps_working_set(self):
+ """
+ When a client has inodes pinned in its cache (open files), that the MDS
+ will not warn about the client not responding to cache pressure when
+ the number of caps is below mds_min_caps_working_set.
+ """
+
+ # Set MDS cache memory limit to a low value that will make the MDS to
+ # ask the client to trim the caps.
+ cache_memory_limit = "1K"
+ open_files = 400
+
+ self.config_set('mds', 'mds_cache_memory_limit', cache_memory_limit)
+ self.config_set('mds', 'mds_recall_max_caps', int(open_files/2))
+ self.config_set('mds', 'mds_recall_warning_threshold', open_files)
+ self.config_set('mds', 'mds_min_caps_working_set', open_files*2)
+
+ mds_min_caps_per_client = int(self.config_get('mds', "mds_min_caps_per_client"))
+ mds_recall_warning_decay_rate = float(self.config_get('mds', "mds_recall_warning_decay_rate"))
+ self.assertGreaterEqual(open_files, mds_min_caps_per_client)
+
+ mount_a_client_id = self.mount_a.get_global_id()
+ self.mount_a.open_n_background("subdir", open_files)
+
+ # Client should now hold:
+ # `open_files` caps for the open files
+ # 1 cap for root
+ # 1 cap for subdir
+ self.wait_until_equal(lambda: self.get_session(mount_a_client_id)['num_caps'],
+ open_files + 2,
+ timeout=600,
+ reject_fn=lambda x: x > open_files + 2)
+
+ # We can also test that the MDS health warning for oversized
+ # cache is functioning as intended.
+ self.wait_for_health("MDS_CACHE_OVERSIZED", mds_recall_warning_decay_rate*2)
+
+ try:
+ # MDS should not be happy about that but it's not sending
+ # MDS_CLIENT_RECALL warnings because the client's caps are below
+ # mds_min_caps_working_set.
+ self.wait_for_health("MDS_CLIENT_RECALL", mds_recall_warning_decay_rate*2)
+ except TestTimeoutError:
+ pass
+ else:
+ raise RuntimeError("expected no client recall warning")
+
def test_client_release_bug(self):
"""
When a client has a bug (which we will simulate) preventing it from releasing caps,
That the MDS will not let a client sit above mds_max_caps_per_client caps.
"""
- mds_min_caps_per_client = int(self.fs.get_config("mds_min_caps_per_client"))
+ mds_min_caps_per_client = int(self.config_get('mds', "mds_min_caps_per_client"))
mds_max_caps_per_client = 2*mds_min_caps_per_client
- self.set_conf('mds', 'mds_max_caps_per_client', mds_max_caps_per_client)
- self.fs.mds_fail_restart()
- self.fs.wait_for_daemons()
+ self.config_set('mds', 'mds_max_caps_per_client', mds_max_caps_per_client)
self.mount_a.create_n_files("foo/", 3*mds_max_caps_per_client, sync=True)