]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_nfs.py
1f439cd3183144206b47b07b1588365c023533e4
[ceph.git] / ceph / qa / tasks / cephfs / test_nfs.py
1 # NOTE: these tests are not yet compatible with vstart_runner.py.
2 import errno
3 import json
4 import time
5 import logging
6 from io import BytesIO
7
8 from tasks.mgr.mgr_test_case import MgrTestCase
9 from teuthology import contextutil
10 from teuthology.exceptions import CommandFailedError
11
12 log = logging.getLogger(__name__)
13
14 NFS_POOL_NAME = '.nfs' # should match mgr_module.py
15
16 # TODO Add test for cluster update when ganesha can be deployed on multiple ports.
17 class TestNFS(MgrTestCase):
18 def _cmd(self, *args):
19 return self.mgr_cluster.mon_manager.raw_cluster_cmd(*args)
20
21 def _nfs_cmd(self, *args):
22 return self._cmd("nfs", *args)
23
24 def _orch_cmd(self, *args):
25 return self._cmd("orch", *args)
26
27 def _sys_cmd(self, cmd):
28 ret = self.ctx.cluster.run(args=cmd, check_status=False, stdout=BytesIO(), stderr=BytesIO())
29 stdout = ret[0].stdout
30 if stdout:
31 return stdout.getvalue()
32
33 def setUp(self):
34 super(TestNFS, self).setUp()
35 self._load_module('nfs')
36 self.cluster_id = "test"
37 self.export_type = "cephfs"
38 self.pseudo_path = "/cephfs"
39 self.path = "/"
40 self.fs_name = "nfs-cephfs"
41 self.expected_name = "nfs.test"
42 self.sample_export = {
43 "export_id": 1,
44 "path": self.path,
45 "cluster_id": self.cluster_id,
46 "pseudo": self.pseudo_path,
47 "access_type": "RW",
48 "squash": "none",
49 "security_label": True,
50 "protocols": [
51 4
52 ],
53 "transports": [
54 "TCP"
55 ],
56 "fsal": {
57 "name": "CEPH",
58 "user_id": "nfs.test.1",
59 "fs_name": self.fs_name,
60 },
61 "clients": []
62 }
63
64 def _check_nfs_server_status(self):
65 res = self._sys_cmd(['sudo', 'systemctl', 'status', 'nfs-server'])
66 if isinstance(res, bytes) and b'Active: active' in res:
67 self._disable_nfs()
68
69 def _disable_nfs(self):
70 log.info("Disabling NFS")
71 self._sys_cmd(['sudo', 'systemctl', 'disable', 'nfs-server', '--now'])
72
73 def _fetch_nfs_daemons_details(self, enable_json=False):
74 args = ('ps', f'--service_name={self.expected_name}')
75 if enable_json:
76 args = (*args, '--format=json')
77 return self._orch_cmd(*args)
78
79 def _check_nfs_cluster_event(self, expected_event):
80 '''
81 Check whether an event occured during the lifetime of the NFS service
82 :param expected_event: event that was expected to occur
83 '''
84 event_occurred = False
85 # Wait few seconds for NFS daemons' status to be updated
86 with contextutil.safe_while(sleep=10, tries=18, _raise=False) as proceed:
87 while not event_occurred and proceed():
88 daemons_details = json.loads(
89 self._fetch_nfs_daemons_details(enable_json=True))
90 log.info('daemons details %s', daemons_details)
91 # 'events' key may not exist in the daemon description
92 # after a mgr fail over and could take some time to appear
93 # (it's populated on first daemon event)
94 if 'events' not in daemons_details[0]:
95 continue
96 for event in daemons_details[0]['events']:
97 log.info('daemon event %s', event)
98 if expected_event in event:
99 event_occurred = True
100 break
101 return event_occurred
102
103 def _check_nfs_cluster_status(self, expected_status, fail_msg):
104 '''
105 Check the current status of the NFS service
106 :param expected_status: Status to be verified
107 :param fail_msg: Message to be printed if test failed
108 '''
109 # Wait for two minutes as ganesha daemon takes some time to be deleted/created
110 wait_time = 10
111 while wait_time <= 120:
112 time.sleep(wait_time)
113 if expected_status in self._fetch_nfs_daemons_details():
114 return
115 wait_time += 10
116 self.fail(fail_msg)
117
118 def _check_auth_ls(self, export_id=1, check_in=False):
119 '''
120 Tests export user id creation or deletion.
121 :param export_id: Denotes export number
122 :param check_in: Check specified export id
123 '''
124 output = self._cmd('auth', 'ls')
125 client_id = f'client.nfs.{self.cluster_id}'
126 if check_in:
127 self.assertIn(f'{client_id}.{export_id}', output)
128 else:
129 self.assertNotIn(f'{client_id}.{export_id}', output)
130
131 def _test_idempotency(self, cmd_func, cmd_args):
132 '''
133 Test idempotency of commands. It first runs the TestNFS test method
134 for a command and then checks the result of command run again. TestNFS
135 test method has required checks to verify that command works.
136 :param cmd_func: TestNFS method
137 :param cmd_args: nfs command arguments to be run
138 '''
139 cmd_func()
140 ret = self.mgr_cluster.mon_manager.raw_cluster_cmd_result(*cmd_args)
141 if ret != 0:
142 self.fail("Idempotency test failed")
143
144 def _test_create_cluster(self):
145 '''
146 Test single nfs cluster deployment.
147 '''
148 # Disable any running nfs ganesha daemon
149 self._check_nfs_server_status()
150 self._nfs_cmd('cluster', 'create', self.cluster_id)
151 # Check for expected status and daemon name (nfs.<cluster_id>)
152 self._check_nfs_cluster_status('running', 'NFS Ganesha cluster deployment failed')
153
154 def _test_delete_cluster(self):
155 '''
156 Test deletion of a single nfs cluster.
157 '''
158 self._nfs_cmd('cluster', 'rm', self.cluster_id)
159 self._check_nfs_cluster_status('No daemons reported',
160 'NFS Ganesha cluster could not be deleted')
161
162 def _test_list_cluster(self, empty=False):
163 '''
164 Test listing of deployed nfs clusters. If nfs cluster is deployed then
165 it checks for expected cluster id. Otherwise checks nothing is listed.
166 :param empty: If true it denotes no cluster is deployed.
167 '''
168 if empty:
169 cluster_id = ''
170 else:
171 cluster_id = self.cluster_id
172 nfs_output = self._nfs_cmd('cluster', 'ls')
173 self.assertEqual(cluster_id, nfs_output.strip())
174
175 def _create_export(self, export_id, create_fs=False, extra_cmd=None):
176 '''
177 Test creation of a single export.
178 :param export_id: Denotes export number
179 :param create_fs: If false filesytem exists. Otherwise create it.
180 :param extra_cmd: List of extra arguments for creating export.
181 '''
182 if create_fs:
183 self._cmd('fs', 'volume', 'create', self.fs_name)
184 with contextutil.safe_while(sleep=5, tries=30) as proceed:
185 while proceed():
186 output = self._cmd(
187 'orch', 'ls', '-f', 'json',
188 '--service-name', f'mds.{self.fs_name}'
189 )
190 j = json.loads(output)
191 if j[0]['status']['running']:
192 break
193 export_cmd = ['nfs', 'export', 'create', 'cephfs',
194 '--fsname', self.fs_name, '--cluster-id', self.cluster_id]
195 if isinstance(extra_cmd, list):
196 export_cmd.extend(extra_cmd)
197 else:
198 export_cmd.extend(['--pseudo-path', self.pseudo_path])
199 # Runs the nfs export create command
200 self._cmd(*export_cmd)
201 # Check if user id for export is created
202 self._check_auth_ls(export_id, check_in=True)
203 res = self._sys_cmd(['rados', '-p', NFS_POOL_NAME, '-N', self.cluster_id, 'get',
204 f'export-{export_id}', '-'])
205 # Check if export object is created
206 if res == b'':
207 self.fail("Export cannot be created")
208
209 def _create_default_export(self):
210 '''
211 Deploy a single nfs cluster and create export with default options.
212 '''
213 self._test_create_cluster()
214 self._create_export(export_id='1', create_fs=True)
215
216 def _delete_export(self):
217 '''
218 Delete an export.
219 '''
220 self._nfs_cmd('export', 'rm', self.cluster_id, self.pseudo_path)
221 self._check_auth_ls()
222
223 def _test_list_export(self):
224 '''
225 Test listing of created exports.
226 '''
227 nfs_output = json.loads(self._nfs_cmd('export', 'ls', self.cluster_id))
228 self.assertIn(self.pseudo_path, nfs_output)
229
230 def _test_list_detailed(self, sub_vol_path):
231 '''
232 Test listing of created exports with detailed option.
233 :param sub_vol_path: Denotes path of subvolume
234 '''
235 nfs_output = json.loads(self._nfs_cmd('export', 'ls', self.cluster_id, '--detailed'))
236 # Export-1 with default values (access type = rw and path = '\')
237 self.assertDictEqual(self.sample_export, nfs_output[0])
238 # Export-2 with r only
239 self.sample_export['export_id'] = 2
240 self.sample_export['pseudo'] = self.pseudo_path + '1'
241 self.sample_export['access_type'] = 'RO'
242 self.sample_export['fsal']['user_id'] = f'{self.expected_name}.2'
243 self.assertDictEqual(self.sample_export, nfs_output[1])
244 # Export-3 for subvolume with r only
245 self.sample_export['export_id'] = 3
246 self.sample_export['path'] = sub_vol_path
247 self.sample_export['pseudo'] = self.pseudo_path + '2'
248 self.sample_export['fsal']['user_id'] = f'{self.expected_name}.3'
249 self.assertDictEqual(self.sample_export, nfs_output[2])
250 # Export-4 for subvolume
251 self.sample_export['export_id'] = 4
252 self.sample_export['pseudo'] = self.pseudo_path + '3'
253 self.sample_export['access_type'] = 'RW'
254 self.sample_export['fsal']['user_id'] = f'{self.expected_name}.4'
255 self.assertDictEqual(self.sample_export, nfs_output[3])
256
257 def _get_export(self):
258 '''
259 Returns export block in json format
260 '''
261 return json.loads(self._nfs_cmd('export', 'info', self.cluster_id, self.pseudo_path))
262
263 def _test_get_export(self):
264 '''
265 Test fetching of created export.
266 '''
267 nfs_output = self._get_export()
268 self.assertDictEqual(self.sample_export, nfs_output)
269
270 def _check_export_obj_deleted(self, conf_obj=False):
271 '''
272 Test if export or config object are deleted successfully.
273 :param conf_obj: It denotes config object needs to be checked
274 '''
275 rados_obj_ls = self._sys_cmd(['rados', '-p', NFS_POOL_NAME, '-N', self.cluster_id, 'ls'])
276
277 if b'export-' in rados_obj_ls or (conf_obj and b'conf-nfs' in rados_obj_ls):
278 self.fail("Delete export failed")
279
280 def _get_port_ip_info(self):
281 '''
282 Return port and ip for a cluster
283 '''
284 #{'test': {'backend': [{'hostname': 'smithi068', 'ip': '172.21.15.68', 'port': 2049}]}}
285 info_output = json.loads(self._nfs_cmd('cluster', 'info', self.cluster_id))['test']['backend'][0]
286 return info_output["port"], info_output["ip"]
287
288 def _test_mnt(self, pseudo_path, port, ip, check=True):
289 '''
290 Test mounting of created exports
291 :param pseudo_path: It is the pseudo root name
292 :param port: Port of deployed nfs cluster
293 :param ip: IP of deployed nfs cluster
294 :param check: It denotes if i/o testing needs to be done
295 '''
296 tries = 3
297 while True:
298 try:
299 self.ctx.cluster.run(
300 args=['sudo', 'mount', '-t', 'nfs', '-o', f'port={port}',
301 f'{ip}:{pseudo_path}', '/mnt'])
302 break
303 except CommandFailedError as e:
304 if tries:
305 tries -= 1
306 time.sleep(2)
307 continue
308 # Check if mount failed only when non existing pseudo path is passed
309 if not check and e.exitstatus == 32:
310 return
311 raise
312
313 self.ctx.cluster.run(args=['sudo', 'chmod', '1777', '/mnt'])
314
315 try:
316 self.ctx.cluster.run(args=['touch', '/mnt/test'])
317 out_mnt = self._sys_cmd(['ls', '/mnt'])
318 self.assertEqual(out_mnt, b'test\n')
319 finally:
320 self.ctx.cluster.run(args=['sudo', 'umount', '/mnt'])
321
322 def _write_to_read_only_export(self, pseudo_path, port, ip):
323 '''
324 Check if write to read only export fails
325 '''
326 try:
327 self._test_mnt(pseudo_path, port, ip)
328 except CommandFailedError as e:
329 # Write to cephfs export should fail for test to pass
330 self.assertEqual(
331 e.exitstatus, errno.EPERM,
332 'invalid error code on trying to write to read-only export')
333 else:
334 self.fail('expected write to a read-only export to fail')
335
336 def test_create_and_delete_cluster(self):
337 '''
338 Test successful creation and deletion of the nfs cluster.
339 '''
340 self._test_create_cluster()
341 self._test_list_cluster()
342 self._test_delete_cluster()
343 # List clusters again to ensure no cluster is shown
344 self._test_list_cluster(empty=True)
345
346 def test_create_delete_cluster_idempotency(self):
347 '''
348 Test idempotency of cluster create and delete commands.
349 '''
350 self._test_idempotency(self._test_create_cluster, ['nfs', 'cluster', 'create', self.cluster_id])
351 self._test_idempotency(self._test_delete_cluster, ['nfs', 'cluster', 'rm', self.cluster_id])
352
353 def test_create_cluster_with_invalid_cluster_id(self):
354 '''
355 Test nfs cluster deployment failure with invalid cluster id.
356 '''
357 try:
358 invalid_cluster_id = '/cluster_test' # Only [A-Za-z0-9-_.] chars are valid
359 self._nfs_cmd('cluster', 'create', invalid_cluster_id)
360 self.fail(f"Cluster successfully created with invalid cluster id {invalid_cluster_id}")
361 except CommandFailedError as e:
362 # Command should fail for test to pass
363 if e.exitstatus != errno.EINVAL:
364 raise
365
366 def test_create_and_delete_export(self):
367 '''
368 Test successful creation and deletion of the cephfs export.
369 '''
370 self._create_default_export()
371 self._test_get_export()
372 port, ip = self._get_port_ip_info()
373 self._test_mnt(self.pseudo_path, port, ip)
374 self._delete_export()
375 # Check if rados export object is deleted
376 self._check_export_obj_deleted()
377 self._test_mnt(self.pseudo_path, port, ip, False)
378 self._test_delete_cluster()
379
380 def test_create_delete_export_idempotency(self):
381 '''
382 Test idempotency of export create and delete commands.
383 '''
384 self._test_idempotency(self._create_default_export, [
385 'nfs', 'export', 'create', 'cephfs',
386 '--fsname', self.fs_name, '--cluster-id', self.cluster_id,
387 '--pseudo-path', self.pseudo_path])
388 self._test_idempotency(self._delete_export, ['nfs', 'export', 'rm', self.cluster_id,
389 self.pseudo_path])
390 self._test_delete_cluster()
391
392 def test_create_multiple_exports(self):
393 '''
394 Test creating multiple exports with different access type and path.
395 '''
396 # Export-1 with default values (access type = rw and path = '\')
397 self._create_default_export()
398 # Export-2 with r only
399 self._create_export(export_id='2',
400 extra_cmd=['--pseudo-path', self.pseudo_path+'1', '--readonly'])
401 # Export-3 for subvolume with r only
402 self._cmd('fs', 'subvolume', 'create', self.fs_name, 'sub_vol')
403 fs_path = self._cmd('fs', 'subvolume', 'getpath', self.fs_name, 'sub_vol').strip()
404 self._create_export(export_id='3',
405 extra_cmd=['--pseudo-path', self.pseudo_path+'2', '--readonly',
406 '--path', fs_path])
407 # Export-4 for subvolume
408 self._create_export(export_id='4',
409 extra_cmd=['--pseudo-path', self.pseudo_path+'3',
410 '--path', fs_path])
411 # Check if exports gets listed
412 self._test_list_detailed(fs_path)
413 self._test_delete_cluster()
414 # Check if rados ganesha conf object is deleted
415 self._check_export_obj_deleted(conf_obj=True)
416 self._check_auth_ls()
417
418 def test_exports_on_mgr_restart(self):
419 '''
420 Test export availability on restarting mgr.
421 '''
422 self._create_default_export()
423 # unload and load module will restart the mgr
424 self._unload_module("cephadm")
425 self._load_module("cephadm")
426 self._orch_cmd("set", "backend", "cephadm")
427 # Check if ganesha daemon is running
428 self._check_nfs_cluster_status('running', 'Failed to redeploy NFS Ganesha cluster')
429 # Checks if created export is listed
430 self._test_list_export()
431 port, ip = self._get_port_ip_info()
432 self._test_mnt(self.pseudo_path, port, ip)
433 self._delete_export()
434 self._test_delete_cluster()
435
436 def test_export_create_with_non_existing_fsname(self):
437 '''
438 Test creating export with non-existing filesystem.
439 '''
440 try:
441 fs_name = 'nfs-test'
442 self._test_create_cluster()
443 self._nfs_cmd('export', 'create', 'cephfs',
444 '--fsname', fs_name, '--cluster-id', self.cluster_id,
445 '--pseudo-path', self.pseudo_path)
446 self.fail(f"Export created with non-existing filesystem {fs_name}")
447 except CommandFailedError as e:
448 # Command should fail for test to pass
449 if e.exitstatus != errno.ENOENT:
450 raise
451 finally:
452 self._test_delete_cluster()
453
454 def test_export_create_with_non_existing_clusterid(self):
455 '''
456 Test creating cephfs export with non-existing nfs cluster.
457 '''
458 try:
459 cluster_id = 'invalidtest'
460 self._nfs_cmd('export', 'create', 'cephfs', '--fsname', self.fs_name,
461 '--cluster-id', cluster_id, '--pseudo-path', self.pseudo_path)
462 self.fail(f"Export created with non-existing cluster id {cluster_id}")
463 except CommandFailedError as e:
464 # Command should fail for test to pass
465 if e.exitstatus != errno.ENOENT:
466 raise
467
468 def test_export_create_with_relative_pseudo_path_and_root_directory(self):
469 '''
470 Test creating cephfs export with relative or '/' pseudo path.
471 '''
472 def check_pseudo_path(pseudo_path):
473 try:
474 self._nfs_cmd('export', 'create', 'cephfs', '--fsname', self.fs_name,
475 '--cluster-id', self.cluster_id,
476 '--pseudo-path', pseudo_path)
477 self.fail(f"Export created for {pseudo_path}")
478 except CommandFailedError as e:
479 # Command should fail for test to pass
480 if e.exitstatus != errno.EINVAL:
481 raise
482
483 self._test_create_cluster()
484 self._cmd('fs', 'volume', 'create', self.fs_name)
485 check_pseudo_path('invalidpath')
486 check_pseudo_path('/')
487 check_pseudo_path('//')
488 self._cmd('fs', 'volume', 'rm', self.fs_name, '--yes-i-really-mean-it')
489 self._test_delete_cluster()
490
491 def test_write_to_read_only_export(self):
492 '''
493 Test write to readonly export.
494 '''
495 self._test_create_cluster()
496 self._create_export(export_id='1', create_fs=True,
497 extra_cmd=['--pseudo-path', self.pseudo_path, '--readonly'])
498 port, ip = self._get_port_ip_info()
499 self._check_nfs_cluster_status('running', 'NFS Ganesha cluster restart failed')
500 self._write_to_read_only_export(self.pseudo_path, port, ip)
501 self._test_delete_cluster()
502
503 def test_cluster_info(self):
504 '''
505 Test cluster info outputs correct ip and hostname
506 '''
507 self._test_create_cluster()
508 info_output = json.loads(self._nfs_cmd('cluster', 'info', self.cluster_id))
509 print(f'info {info_output}')
510 info_ip = info_output[self.cluster_id].get('backend', [])[0].pop("ip")
511 host_details = {
512 self.cluster_id: {
513 'backend': [
514 {
515 "hostname": self._sys_cmd(['hostname']).decode("utf-8").strip(),
516 "port": 2049
517 }
518 ],
519 "virtual_ip": None,
520 }
521 }
522 host_ip = self._sys_cmd(['hostname', '-I']).decode("utf-8").split()
523 print(f'host_ip is {host_ip}, info_ip is {info_ip}')
524 self.assertDictEqual(info_output, host_details)
525 self.assertTrue(info_ip in host_ip)
526 self._test_delete_cluster()
527
528 def test_cluster_set_reset_user_config(self):
529 '''
530 Test cluster is created using user config and reverts back to default
531 config on reset.
532 '''
533 self._test_create_cluster()
534
535 pool = NFS_POOL_NAME
536 user_id = 'test'
537 fs_name = 'user_test_fs'
538 pseudo_path = '/ceph'
539 self._cmd('fs', 'volume', 'create', fs_name)
540 time.sleep(20)
541 key = self._cmd('auth', 'get-or-create-key', f'client.{user_id}', 'mon',
542 'allow r', 'osd',
543 f'allow rw pool={pool} namespace={self.cluster_id}, allow rw tag cephfs data={fs_name}',
544 'mds', f'allow rw path={self.path}').strip()
545 config = f""" LOG {{
546 Default_log_level = FULL_DEBUG;
547 }}
548
549 EXPORT {{
550 Export_Id = 100;
551 Transports = TCP;
552 Path = /;
553 Pseudo = {pseudo_path};
554 Protocols = 4;
555 Access_Type = RW;
556 Attr_Expiration_Time = 0;
557 Squash = None;
558 FSAL {{
559 Name = CEPH;
560 Filesystem = {fs_name};
561 User_Id = {user_id};
562 Secret_Access_Key = '{key}';
563 }}
564 }}"""
565 port, ip = self._get_port_ip_info()
566 self.ctx.cluster.run(args=['ceph', 'nfs', 'cluster', 'config',
567 'set', self.cluster_id, '-i', '-'], stdin=config)
568 time.sleep(30)
569 res = self._sys_cmd(['rados', '-p', pool, '-N', self.cluster_id, 'get',
570 f'userconf-nfs.{user_id}', '-'])
571 self.assertEqual(config, res.decode('utf-8'))
572 self._test_mnt(pseudo_path, port, ip)
573 self._nfs_cmd('cluster', 'config', 'reset', self.cluster_id)
574 rados_obj_ls = self._sys_cmd(['rados', '-p', NFS_POOL_NAME, '-N', self.cluster_id, 'ls'])
575 if b'conf-nfs' not in rados_obj_ls and b'userconf-nfs' in rados_obj_ls:
576 self.fail("User config not deleted")
577 time.sleep(30)
578 self._test_mnt(pseudo_path, port, ip, False)
579 self._cmd('fs', 'volume', 'rm', fs_name, '--yes-i-really-mean-it')
580 self._test_delete_cluster()
581
582 def test_cluster_set_user_config_with_non_existing_clusterid(self):
583 '''
584 Test setting user config for non-existing nfs cluster.
585 '''
586 try:
587 cluster_id = 'invalidtest'
588 self.ctx.cluster.run(args=['ceph', 'nfs', 'cluster',
589 'config', 'set', self.cluster_id, '-i', '-'], stdin='testing')
590 self.fail(f"User config set for non-existing cluster {cluster_id}")
591 except CommandFailedError as e:
592 # Command should fail for test to pass
593 if e.exitstatus != errno.ENOENT:
594 raise
595
596 def test_cluster_reset_user_config_with_non_existing_clusterid(self):
597 '''
598 Test resetting user config for non-existing nfs cluster.
599 '''
600 try:
601 cluster_id = 'invalidtest'
602 self._nfs_cmd('cluster', 'config', 'reset', cluster_id)
603 self.fail(f"User config reset for non-existing cluster {cluster_id}")
604 except CommandFailedError as e:
605 # Command should fail for test to pass
606 if e.exitstatus != errno.ENOENT:
607 raise
608
609 def test_create_export_via_apply(self):
610 '''
611 Test creation of export via apply
612 '''
613 self._test_create_cluster()
614 self.ctx.cluster.run(args=['ceph', 'nfs', 'export', 'apply',
615 self.cluster_id, '-i', '-'],
616 stdin=json.dumps({
617 "path": "/",
618 "pseudo": "/cephfs",
619 "squash": "none",
620 "access_type": "rw",
621 "protocols": [4],
622 "fsal": {
623 "name": "CEPH",
624 "fs_name": self.fs_name
625 }
626 }))
627 port, ip = self._get_port_ip_info()
628 self._test_mnt(self.pseudo_path, port, ip)
629 self._check_nfs_cluster_status(
630 'running', 'NFS Ganesha cluster not running after new export was applied')
631 self._test_delete_cluster()
632
633 def test_update_export(self):
634 '''
635 Test update of export's pseudo path and access type from rw to ro
636 '''
637 self._create_default_export()
638 port, ip = self._get_port_ip_info()
639 self._test_mnt(self.pseudo_path, port, ip)
640 export_block = self._get_export()
641 new_pseudo_path = '/testing'
642 export_block['pseudo'] = new_pseudo_path
643 export_block['access_type'] = 'RO'
644 self.ctx.cluster.run(args=['ceph', 'nfs', 'export', 'apply',
645 self.cluster_id, '-i', '-'],
646 stdin=json.dumps(export_block))
647 if not self._check_nfs_cluster_event('restart'):
648 self.fail("updating export's pseudo path should trigger restart of NFS service")
649 self._check_nfs_cluster_status('running', 'NFS Ganesha cluster not running after restart')
650 self._write_to_read_only_export(new_pseudo_path, port, ip)
651 self._test_delete_cluster()
652
653 def test_update_export_ro_to_rw(self):
654 '''
655 Test update of export's access level from ro to rw
656 '''
657 self._test_create_cluster()
658 self._create_export(
659 export_id='1', create_fs=True,
660 extra_cmd=['--pseudo-path', self.pseudo_path, '--readonly'])
661 port, ip = self._get_port_ip_info()
662 self._write_to_read_only_export(self.pseudo_path, port, ip)
663 export_block = self._get_export()
664 export_block['access_type'] = 'RW'
665 self.ctx.cluster.run(
666 args=['ceph', 'nfs', 'export', 'apply', self.cluster_id, '-i', '-'],
667 stdin=json.dumps(export_block))
668 if self._check_nfs_cluster_event('restart'):
669 self.fail("update of export's access type should not trigger NFS service restart")
670 self._test_mnt(self.pseudo_path, port, ip)
671 self._test_delete_cluster()
672
673 def test_update_export_with_invalid_values(self):
674 '''
675 Test update of export with invalid values
676 '''
677 self._create_default_export()
678 export_block = self._get_export()
679
680 def update_with_invalid_values(key, value, fsal=False):
681 export_block_new = dict(export_block)
682 if fsal:
683 export_block_new['fsal'] = dict(export_block['fsal'])
684 export_block_new['fsal'][key] = value
685 else:
686 export_block_new[key] = value
687 try:
688 self.ctx.cluster.run(args=['ceph', 'nfs', 'export', 'apply',
689 self.cluster_id, '-i', '-'],
690 stdin=json.dumps(export_block_new))
691 except CommandFailedError:
692 pass
693
694 update_with_invalid_values('export_id', 9)
695 update_with_invalid_values('cluster_id', 'testing_new')
696 update_with_invalid_values('pseudo', 'test_relpath')
697 update_with_invalid_values('access_type', 'W')
698 update_with_invalid_values('squash', 'no_squash')
699 update_with_invalid_values('security_label', 'invalid')
700 update_with_invalid_values('protocols', [2])
701 update_with_invalid_values('transports', ['UD'])
702 update_with_invalid_values('name', 'RGW', True)
703 update_with_invalid_values('user_id', 'testing_export', True)
704 update_with_invalid_values('fs_name', 'b', True)
705 self._test_delete_cluster()
706
707 def test_cmds_without_reqd_args(self):
708 '''
709 Test that cmd fails on not passing required arguments
710 '''
711 def exec_cmd_invalid(*cmd):
712 try:
713 self._nfs_cmd(*cmd)
714 self.fail(f"nfs {cmd} command executed successfully without required arguments")
715 except CommandFailedError as e:
716 # Command should fail for test to pass
717 if e.exitstatus != errno.EINVAL:
718 raise
719
720 exec_cmd_invalid('cluster', 'create')
721 exec_cmd_invalid('cluster', 'delete')
722 exec_cmd_invalid('cluster', 'config', 'set')
723 exec_cmd_invalid('cluster', 'config', 'reset')
724 exec_cmd_invalid('export', 'create', 'cephfs')
725 exec_cmd_invalid('export', 'create', 'cephfs', 'clusterid')
726 exec_cmd_invalid('export', 'create', 'cephfs', 'clusterid', 'a_fs')
727 exec_cmd_invalid('export', 'ls')
728 exec_cmd_invalid('export', 'delete')
729 exec_cmd_invalid('export', 'delete', 'clusterid')
730 exec_cmd_invalid('export', 'info')
731 exec_cmd_invalid('export', 'info', 'clusterid')
732 exec_cmd_invalid('export', 'apply')