]> git.proxmox.com Git - ceph.git/blobdiff - ceph/qa/tasks/cephfs/test_nfs.py
import ceph quincy 17.2.1
[ceph.git] / ceph / qa / tasks / cephfs / test_nfs.py
index 97269a32c55bb4667491930366f188ca875eb52c..47b3e63a6da984c1fbfde5d8fc3280ef5cf6c8b4 100644 (file)
@@ -70,20 +70,42 @@ class TestNFS(MgrTestCase):
         log.info("Disabling NFS")
         self._sys_cmd(['sudo', 'systemctl', 'disable', 'nfs-server', '--now'])
 
-    def _fetch_nfs_status(self):
-        return self._orch_cmd('ps', f'--service_name={self.expected_name}')
+    def _fetch_nfs_daemons_details(self, enable_json=False):
+        args = ('ps', f'--service_name={self.expected_name}')
+        if enable_json:
+            args = (*args, '--format=json')
+        return self._orch_cmd(*args)
+
+    def _check_nfs_cluster_event(self, expected_event):
+        '''
+        Check whether an event occured during the lifetime of the NFS service
+        :param expected_event: event that was expected to occur
+        '''
+        event_occurred = False
+        # Wait few seconds for NFS daemons' status to be updated
+        with contextutil.safe_while(sleep=10, tries=12, _raise=False) as proceed:
+            while not event_occurred and proceed():
+                daemons_details = json.loads(
+                    self._fetch_nfs_daemons_details(enable_json=True))
+                log.info('daemons details %s', daemons_details)
+                for event in daemons_details[0]['events']:
+                    log.info('daemon event %s', event)
+                    if expected_event in event:
+                        event_occurred = True
+                        break
+        return event_occurred
 
     def _check_nfs_cluster_status(self, expected_status, fail_msg):
         '''
-        Tests if nfs cluster created or deleted successfully
+        Check the current status of the NFS service
         :param expected_status: Status to be verified
         :param fail_msg: Message to be printed if test failed
         '''
-        # Wait for few seconds as ganesha daemon takes few seconds to be deleted/created
+        # Wait for two minutes as ganesha daemon takes some time to be deleted/created
         wait_time = 10
-        while wait_time <= 60:
+        while wait_time <= 120:
             time.sleep(wait_time)
-            if expected_status in self._fetch_nfs_status():
+            if expected_status in self._fetch_nfs_daemons_details():
                 return
             wait_time += 10
         self.fail(fail_msg)
@@ -300,8 +322,11 @@ class TestNFS(MgrTestCase):
             self._test_mnt(pseudo_path, port, ip)
         except CommandFailedError as e:
             # Write to cephfs export should fail for test to pass
-            if e.exitstatus != errno.EPERM:
-                raise
+            self.assertEqual(
+                e.exitstatus, errno.EPERM,
+                'invalid error code on trying to write to read-only export')
+        else:
+            self.fail('expected write to a read-only export to fail')
 
     def test_create_and_delete_cluster(self):
         '''
@@ -596,12 +621,13 @@ class TestNFS(MgrTestCase):
                              }))
         port, ip = self._get_port_ip_info()
         self._test_mnt(self.pseudo_path, port, ip)
-        self._check_nfs_cluster_status('running', 'NFS Ganesha cluster restart failed')
+        self._check_nfs_cluster_status(
+            'running', 'NFS Ganesha cluster not running after new export was applied')
         self._test_delete_cluster()
 
     def test_update_export(self):
         '''
-        Test update of exports
+        Test update of export's pseudo path and access type from rw to ro
         '''
         self._create_default_export()
         port, ip = self._get_port_ip_info()
@@ -613,10 +639,32 @@ class TestNFS(MgrTestCase):
         self.ctx.cluster.run(args=['ceph', 'nfs', 'export', 'apply',
                                    self.cluster_id, '-i', '-'],
                              stdin=json.dumps(export_block))
-        self._check_nfs_cluster_status('running', 'NFS Ganesha cluster restart failed')
+        if not self._check_nfs_cluster_event('restart'):
+            self.fail("updating export's pseudo path should trigger restart of NFS service")
+        self._check_nfs_cluster_status('running', 'NFS Ganesha cluster not running after restart')
         self._write_to_read_only_export(new_pseudo_path, port, ip)
         self._test_delete_cluster()
 
+    def test_update_export_ro_to_rw(self):
+        '''
+        Test update of export's access level from ro to rw
+        '''
+        self._test_create_cluster()
+        self._create_export(
+            export_id='1', create_fs=True,
+            extra_cmd=['--pseudo-path', self.pseudo_path, '--readonly'])
+        port, ip = self._get_port_ip_info()
+        self._write_to_read_only_export(self.pseudo_path, port, ip)
+        export_block = self._get_export()
+        export_block['access_type'] = 'RW'
+        self.ctx.cluster.run(
+            args=['ceph', 'nfs', 'export', 'apply', self.cluster_id, '-i', '-'],
+            stdin=json.dumps(export_block))
+        if self._check_nfs_cluster_event('restart'):
+            self.fail("update of export's access type should not trigger NFS service restart")
+        self._test_mnt(self.pseudo_path, port, ip)
+        self._test_delete_cluster()
+
     def test_update_export_with_invalid_values(self):
         '''
         Test update of export with invalid values