]>
Commit | Line | Data |
---|---|---|
f67539c2 | 1 | # NOTE: these tests are not yet compatible with vstart_runner.py. |
f6b5b4d7 TL |
2 | import errno |
3 | import json | |
4 | import time | |
5 | import logging | |
1e59de90 | 6 | from io import BytesIO, StringIO |
f6b5b4d7 TL |
7 | |
8 | from tasks.mgr.mgr_test_case import MgrTestCase | |
a4b75251 | 9 | from teuthology import contextutil |
f6b5b4d7 TL |
10 | from teuthology.exceptions import CommandFailedError |
11 | ||
12 | log = logging.getLogger(__name__) | |
13 | ||
a4b75251 | 14 | NFS_POOL_NAME = '.nfs' # should match mgr_module.py |
f6b5b4d7 TL |
15 | |
16 | # TODO Add test for cluster update when ganesha can be deployed on multiple ports. | |
17 | class TestNFS(MgrTestCase): | |
f91f0fd5 | 18 | def _cmd(self, *args): |
f6b5b4d7 TL |
19 | return self.mgr_cluster.mon_manager.raw_cluster_cmd(*args) |
20 | ||
21 | def _nfs_cmd(self, *args): | |
22 | return self._cmd("nfs", *args) | |
23 | ||
1e59de90 TL |
24 | def _nfs_complete_cmd(self, cmd): |
25 | return self.mgr_cluster.mon_manager.run_cluster_cmd(args=f"nfs {cmd}", | |
26 | stdout=StringIO(), | |
27 | stderr=StringIO(), | |
28 | check_status=False) | |
29 | ||
f6b5b4d7 TL |
30 | def _orch_cmd(self, *args): |
31 | return self._cmd("orch", *args) | |
32 | ||
33 | def _sys_cmd(self, cmd): | |
f6b5b4d7 TL |
34 | ret = self.ctx.cluster.run(args=cmd, check_status=False, stdout=BytesIO(), stderr=BytesIO()) |
35 | stdout = ret[0].stdout | |
36 | if stdout: | |
37 | return stdout.getvalue() | |
38 | ||
39 | def setUp(self): | |
40 | super(TestNFS, self).setUp() | |
b3b6e05e | 41 | self._load_module('nfs') |
f6b5b4d7 TL |
42 | self.cluster_id = "test" |
43 | self.export_type = "cephfs" | |
44 | self.pseudo_path = "/cephfs" | |
45 | self.path = "/" | |
46 | self.fs_name = "nfs-cephfs" | |
f67539c2 | 47 | self.expected_name = "nfs.test" |
f6b5b4d7 TL |
48 | self.sample_export = { |
49 | "export_id": 1, | |
50 | "path": self.path, | |
51 | "cluster_id": self.cluster_id, | |
52 | "pseudo": self.pseudo_path, | |
53 | "access_type": "RW", | |
a4b75251 | 54 | "squash": "none", |
f6b5b4d7 TL |
55 | "security_label": True, |
56 | "protocols": [ | |
57 | 4 | |
58 | ], | |
59 | "transports": [ | |
60 | "TCP" | |
61 | ], | |
62 | "fsal": { | |
63 | "name": "CEPH", | |
a4b75251 | 64 | "user_id": "nfs.test.1", |
f6b5b4d7 | 65 | "fs_name": self.fs_name, |
f6b5b4d7 TL |
66 | }, |
67 | "clients": [] | |
68 | } | |
69 | ||
f6b5b4d7 | 70 | def _check_nfs_server_status(self): |
a4b75251 | 71 | res = self._sys_cmd(['sudo', 'systemctl', 'status', 'nfs-server']) |
f6b5b4d7 TL |
72 | if isinstance(res, bytes) and b'Active: active' in res: |
73 | self._disable_nfs() | |
74 | ||
75 | def _disable_nfs(self): | |
76 | log.info("Disabling NFS") | |
a4b75251 | 77 | self._sys_cmd(['sudo', 'systemctl', 'disable', 'nfs-server', '--now']) |
f6b5b4d7 | 78 | |
33c7a0ef TL |
79 | def _fetch_nfs_daemons_details(self, enable_json=False): |
80 | args = ('ps', f'--service_name={self.expected_name}') | |
81 | if enable_json: | |
82 | args = (*args, '--format=json') | |
83 | return self._orch_cmd(*args) | |
84 | ||
85 | def _check_nfs_cluster_event(self, expected_event): | |
86 | ''' | |
87 | Check whether an event occured during the lifetime of the NFS service | |
88 | :param expected_event: event that was expected to occur | |
89 | ''' | |
90 | event_occurred = False | |
91 | # Wait few seconds for NFS daemons' status to be updated | |
2a845540 | 92 | with contextutil.safe_while(sleep=10, tries=18, _raise=False) as proceed: |
33c7a0ef TL |
93 | while not event_occurred and proceed(): |
94 | daemons_details = json.loads( | |
95 | self._fetch_nfs_daemons_details(enable_json=True)) | |
96 | log.info('daemons details %s', daemons_details) | |
2a845540 TL |
97 | # 'events' key may not exist in the daemon description |
98 | # after a mgr fail over and could take some time to appear | |
99 | # (it's populated on first daemon event) | |
100 | if 'events' not in daemons_details[0]: | |
101 | continue | |
33c7a0ef TL |
102 | for event in daemons_details[0]['events']: |
103 | log.info('daemon event %s', event) | |
104 | if expected_event in event: | |
105 | event_occurred = True | |
106 | break | |
107 | return event_occurred | |
f91f0fd5 TL |
108 | |
109 | def _check_nfs_cluster_status(self, expected_status, fail_msg): | |
110 | ''' | |
33c7a0ef | 111 | Check the current status of the NFS service |
f91f0fd5 TL |
112 | :param expected_status: Status to be verified |
113 | :param fail_msg: Message to be printed if test failed | |
114 | ''' | |
1e59de90 TL |
115 | # Wait for a minute as ganesha daemon takes some time to be |
116 | # deleted/created | |
117 | with contextutil.safe_while(sleep=6, tries=10, _raise=False) as proceed: | |
118 | while proceed(): | |
119 | if expected_status in self._fetch_nfs_daemons_details(): | |
120 | return | |
f91f0fd5 | 121 | self.fail(fail_msg) |
f6b5b4d7 TL |
122 | |
123 | def _check_auth_ls(self, export_id=1, check_in=False): | |
124 | ''' | |
125 | Tests export user id creation or deletion. | |
126 | :param export_id: Denotes export number | |
127 | :param check_in: Check specified export id | |
128 | ''' | |
129 | output = self._cmd('auth', 'ls') | |
a4b75251 | 130 | client_id = f'client.nfs.{self.cluster_id}' |
f6b5b4d7 | 131 | if check_in: |
a4b75251 | 132 | self.assertIn(f'{client_id}.{export_id}', output) |
f6b5b4d7 | 133 | else: |
a4b75251 | 134 | self.assertNotIn(f'{client_id}.{export_id}', output) |
f6b5b4d7 TL |
135 | |
136 | def _test_idempotency(self, cmd_func, cmd_args): | |
137 | ''' | |
138 | Test idempotency of commands. It first runs the TestNFS test method | |
139 | for a command and then checks the result of command run again. TestNFS | |
140 | test method has required checks to verify that command works. | |
141 | :param cmd_func: TestNFS method | |
142 | :param cmd_args: nfs command arguments to be run | |
143 | ''' | |
144 | cmd_func() | |
145 | ret = self.mgr_cluster.mon_manager.raw_cluster_cmd_result(*cmd_args) | |
146 | if ret != 0: | |
147 | self.fail("Idempotency test failed") | |
148 | ||
149 | def _test_create_cluster(self): | |
150 | ''' | |
151 | Test single nfs cluster deployment. | |
152 | ''' | |
1e59de90 TL |
153 | with contextutil.safe_while(sleep=4, tries=10) as proceed: |
154 | while proceed(): | |
155 | try: | |
156 | # Disable any running nfs ganesha daemon | |
157 | self._check_nfs_server_status() | |
158 | cluster_create = self._nfs_complete_cmd( | |
159 | f'cluster create {self.cluster_id}') | |
160 | if cluster_create.stderr and 'cluster already exists' \ | |
161 | in cluster_create.stderr.getvalue(): | |
162 | self._test_delete_cluster() | |
163 | continue | |
164 | # Check for expected status and daemon name | |
165 | # (nfs.<cluster_id>) | |
166 | self._check_nfs_cluster_status( | |
167 | 'running', 'NFS Ganesha cluster deployment failed') | |
168 | break | |
169 | except (AssertionError, CommandFailedError) as e: | |
170 | log.warning(f'{e}, retrying') | |
f6b5b4d7 TL |
171 | |
172 | def _test_delete_cluster(self): | |
173 | ''' | |
174 | Test deletion of a single nfs cluster. | |
175 | ''' | |
b3b6e05e | 176 | self._nfs_cmd('cluster', 'rm', self.cluster_id) |
f91f0fd5 TL |
177 | self._check_nfs_cluster_status('No daemons reported', |
178 | 'NFS Ganesha cluster could not be deleted') | |
f6b5b4d7 TL |
179 | |
180 | def _test_list_cluster(self, empty=False): | |
181 | ''' | |
182 | Test listing of deployed nfs clusters. If nfs cluster is deployed then | |
183 | it checks for expected cluster id. Otherwise checks nothing is listed. | |
184 | :param empty: If true it denotes no cluster is deployed. | |
185 | ''' | |
1e59de90 TL |
186 | nfs_output = self._nfs_cmd('cluster', 'ls') |
187 | jdata = json.loads(nfs_output) | |
f6b5b4d7 | 188 | if empty: |
1e59de90 | 189 | self.assertEqual(len(jdata), 0) |
f6b5b4d7 TL |
190 | else: |
191 | cluster_id = self.cluster_id | |
1e59de90 | 192 | self.assertEqual([cluster_id], jdata) |
f6b5b4d7 TL |
193 | |
194 | def _create_export(self, export_id, create_fs=False, extra_cmd=None): | |
195 | ''' | |
196 | Test creation of a single export. | |
197 | :param export_id: Denotes export number | |
198 | :param create_fs: If false filesytem exists. Otherwise create it. | |
199 | :param extra_cmd: List of extra arguments for creating export. | |
200 | ''' | |
201 | if create_fs: | |
202 | self._cmd('fs', 'volume', 'create', self.fs_name) | |
a4b75251 TL |
203 | with contextutil.safe_while(sleep=5, tries=30) as proceed: |
204 | while proceed(): | |
205 | output = self._cmd( | |
206 | 'orch', 'ls', '-f', 'json', | |
207 | '--service-name', f'mds.{self.fs_name}' | |
208 | ) | |
209 | j = json.loads(output) | |
210 | if j[0]['status']['running']: | |
211 | break | |
212 | export_cmd = ['nfs', 'export', 'create', 'cephfs', | |
213 | '--fsname', self.fs_name, '--cluster-id', self.cluster_id] | |
f6b5b4d7 TL |
214 | if isinstance(extra_cmd, list): |
215 | export_cmd.extend(extra_cmd) | |
216 | else: | |
a4b75251 | 217 | export_cmd.extend(['--pseudo-path', self.pseudo_path]) |
f6b5b4d7 TL |
218 | # Runs the nfs export create command |
219 | self._cmd(*export_cmd) | |
220 | # Check if user id for export is created | |
221 | self._check_auth_ls(export_id, check_in=True) | |
a4b75251 | 222 | res = self._sys_cmd(['rados', '-p', NFS_POOL_NAME, '-N', self.cluster_id, 'get', |
f6b5b4d7 TL |
223 | f'export-{export_id}', '-']) |
224 | # Check if export object is created | |
225 | if res == b'': | |
226 | self.fail("Export cannot be created") | |
227 | ||
228 | def _create_default_export(self): | |
229 | ''' | |
230 | Deploy a single nfs cluster and create export with default options. | |
231 | ''' | |
232 | self._test_create_cluster() | |
233 | self._create_export(export_id='1', create_fs=True) | |
234 | ||
235 | def _delete_export(self): | |
236 | ''' | |
237 | Delete an export. | |
238 | ''' | |
b3b6e05e | 239 | self._nfs_cmd('export', 'rm', self.cluster_id, self.pseudo_path) |
f6b5b4d7 TL |
240 | self._check_auth_ls() |
241 | ||
242 | def _test_list_export(self): | |
243 | ''' | |
244 | Test listing of created exports. | |
245 | ''' | |
246 | nfs_output = json.loads(self._nfs_cmd('export', 'ls', self.cluster_id)) | |
247 | self.assertIn(self.pseudo_path, nfs_output) | |
248 | ||
249 | def _test_list_detailed(self, sub_vol_path): | |
250 | ''' | |
251 | Test listing of created exports with detailed option. | |
252 | :param sub_vol_path: Denotes path of subvolume | |
253 | ''' | |
254 | nfs_output = json.loads(self._nfs_cmd('export', 'ls', self.cluster_id, '--detailed')) | |
255 | # Export-1 with default values (access type = rw and path = '\') | |
256 | self.assertDictEqual(self.sample_export, nfs_output[0]) | |
257 | # Export-2 with r only | |
258 | self.sample_export['export_id'] = 2 | |
259 | self.sample_export['pseudo'] = self.pseudo_path + '1' | |
260 | self.sample_export['access_type'] = 'RO' | |
a4b75251 | 261 | self.sample_export['fsal']['user_id'] = f'{self.expected_name}.2' |
f6b5b4d7 TL |
262 | self.assertDictEqual(self.sample_export, nfs_output[1]) |
263 | # Export-3 for subvolume with r only | |
264 | self.sample_export['export_id'] = 3 | |
265 | self.sample_export['path'] = sub_vol_path | |
266 | self.sample_export['pseudo'] = self.pseudo_path + '2' | |
a4b75251 | 267 | self.sample_export['fsal']['user_id'] = f'{self.expected_name}.3' |
f6b5b4d7 TL |
268 | self.assertDictEqual(self.sample_export, nfs_output[2]) |
269 | # Export-4 for subvolume | |
270 | self.sample_export['export_id'] = 4 | |
271 | self.sample_export['pseudo'] = self.pseudo_path + '3' | |
272 | self.sample_export['access_type'] = 'RW' | |
a4b75251 | 273 | self.sample_export['fsal']['user_id'] = f'{self.expected_name}.4' |
f6b5b4d7 TL |
274 | self.assertDictEqual(self.sample_export, nfs_output[3]) |
275 | ||
f67539c2 TL |
276 | def _get_export(self): |
277 | ''' | |
278 | Returns export block in json format | |
279 | ''' | |
a4b75251 | 280 | return json.loads(self._nfs_cmd('export', 'info', self.cluster_id, self.pseudo_path)) |
f67539c2 | 281 | |
f6b5b4d7 TL |
282 | def _test_get_export(self): |
283 | ''' | |
284 | Test fetching of created export. | |
285 | ''' | |
f67539c2 | 286 | nfs_output = self._get_export() |
f6b5b4d7 TL |
287 | self.assertDictEqual(self.sample_export, nfs_output) |
288 | ||
289 | def _check_export_obj_deleted(self, conf_obj=False): | |
290 | ''' | |
291 | Test if export or config object are deleted successfully. | |
292 | :param conf_obj: It denotes config object needs to be checked | |
293 | ''' | |
a4b75251 | 294 | rados_obj_ls = self._sys_cmd(['rados', '-p', NFS_POOL_NAME, '-N', self.cluster_id, 'ls']) |
f6b5b4d7 TL |
295 | |
296 | if b'export-' in rados_obj_ls or (conf_obj and b'conf-nfs' in rados_obj_ls): | |
297 | self.fail("Delete export failed") | |
298 | ||
f91f0fd5 TL |
299 | def _get_port_ip_info(self): |
300 | ''' | |
301 | Return port and ip for a cluster | |
302 | ''' | |
1e59de90 TL |
303 | #{'test': {'backend': [{'hostname': 'smithi068', 'ip': '172.21.15.68', |
304 | #'port': 2049}]}} | |
305 | with contextutil.safe_while(sleep=5, tries=6) as proceed: | |
306 | while proceed(): | |
307 | try: | |
308 | info_output = json.loads( | |
309 | self._nfs_cmd('cluster', 'info', | |
310 | self.cluster_id))['test']['backend'][0] | |
311 | return info_output["port"], info_output["ip"] | |
312 | except (IndexError, CommandFailedError) as e: | |
313 | if 'list index out of range' in str(e): | |
314 | log.warning('no port and/or ip found, retrying') | |
315 | else: | |
316 | log.warning(f'{e}, retrying') | |
f91f0fd5 TL |
317 | |
318 | def _test_mnt(self, pseudo_path, port, ip, check=True): | |
319 | ''' | |
320 | Test mounting of created exports | |
321 | :param pseudo_path: It is the pseudo root name | |
322 | :param port: Port of deployed nfs cluster | |
323 | :param ip: IP of deployed nfs cluster | |
324 | :param check: It denotes if i/o testing needs to be done | |
325 | ''' | |
a4b75251 TL |
326 | tries = 3 |
327 | while True: | |
328 | try: | |
329 | self.ctx.cluster.run( | |
330 | args=['sudo', 'mount', '-t', 'nfs', '-o', f'port={port}', | |
331 | f'{ip}:{pseudo_path}', '/mnt']) | |
332 | break | |
333 | except CommandFailedError as e: | |
334 | if tries: | |
335 | tries -= 1 | |
336 | time.sleep(2) | |
337 | continue | |
338 | # Check if mount failed only when non existing pseudo path is passed | |
339 | if not check and e.exitstatus == 32: | |
340 | return | |
341 | raise | |
f91f0fd5 | 342 | |
522d829b TL |
343 | self.ctx.cluster.run(args=['sudo', 'chmod', '1777', '/mnt']) |
344 | ||
f67539c2 | 345 | try: |
522d829b TL |
346 | self.ctx.cluster.run(args=['touch', '/mnt/test']) |
347 | out_mnt = self._sys_cmd(['ls', '/mnt']) | |
f91f0fd5 | 348 | self.assertEqual(out_mnt, b'test\n') |
f67539c2 | 349 | finally: |
f91f0fd5 TL |
350 | self.ctx.cluster.run(args=['sudo', 'umount', '/mnt']) |
351 | ||
f67539c2 TL |
352 | def _write_to_read_only_export(self, pseudo_path, port, ip): |
353 | ''' | |
354 | Check if write to read only export fails | |
355 | ''' | |
356 | try: | |
357 | self._test_mnt(pseudo_path, port, ip) | |
358 | except CommandFailedError as e: | |
359 | # Write to cephfs export should fail for test to pass | |
33c7a0ef TL |
360 | self.assertEqual( |
361 | e.exitstatus, errno.EPERM, | |
362 | 'invalid error code on trying to write to read-only export') | |
363 | else: | |
364 | self.fail('expected write to a read-only export to fail') | |
f67539c2 | 365 | |
1e59de90 TL |
366 | def _create_cluster_with_fs(self, fs_name, mnt_pt=None): |
367 | """ | |
368 | create a cluster along with fs and mount it to the path supplied | |
369 | :param fs_name: name of CephFS volume to be created | |
370 | :param mnt_pt: mount fs to the path | |
371 | """ | |
372 | self._test_create_cluster() | |
373 | self._cmd('fs', 'volume', 'create', fs_name) | |
374 | with contextutil.safe_while(sleep=5, tries=30) as proceed: | |
375 | while proceed(): | |
376 | output = self._cmd( | |
377 | 'orch', 'ls', '-f', 'json', | |
378 | '--service-name', f'mds.{fs_name}' | |
379 | ) | |
380 | j = json.loads(output) | |
381 | if j[0]['status']['running']: | |
382 | break | |
383 | if mnt_pt: | |
384 | with contextutil.safe_while(sleep=3, tries=3) as proceed: | |
385 | while proceed(): | |
386 | try: | |
387 | self.ctx.cluster.run(args=['sudo', 'ceph-fuse', mnt_pt]) | |
388 | break | |
389 | except CommandFailedError as e: | |
390 | log.warning(f'{e}, retrying') | |
391 | self.ctx.cluster.run(args=['sudo', 'chmod', '1777', mnt_pt]) | |
392 | ||
393 | def _delete_cluster_with_fs(self, fs_name, mnt_pt=None, mode=None): | |
394 | """ | |
395 | delete cluster along with fs and unmount it from the path supplied | |
396 | :param fs_name: name of CephFS volume to be deleted | |
397 | :param mnt_pt: unmount fs from the path | |
398 | :param mode: revert to this mode | |
399 | """ | |
400 | if mnt_pt: | |
401 | self.ctx.cluster.run(args=['sudo', 'umount', mnt_pt]) | |
402 | if mode: | |
403 | if isinstance(mode, bytes): | |
404 | mode = mode.decode().strip() | |
405 | self.ctx.cluster.run(args=['sudo', 'chmod', mode, mnt_pt]) | |
406 | self._cmd('fs', 'volume', 'rm', fs_name, '--yes-i-really-mean-it') | |
407 | self._test_delete_cluster() | |
408 | ||
f6b5b4d7 TL |
409 | def test_create_and_delete_cluster(self): |
410 | ''' | |
411 | Test successful creation and deletion of the nfs cluster. | |
412 | ''' | |
413 | self._test_create_cluster() | |
414 | self._test_list_cluster() | |
415 | self._test_delete_cluster() | |
416 | # List clusters again to ensure no cluster is shown | |
417 | self._test_list_cluster(empty=True) | |
418 | ||
419 | def test_create_delete_cluster_idempotency(self): | |
420 | ''' | |
421 | Test idempotency of cluster create and delete commands. | |
422 | ''' | |
b3b6e05e TL |
423 | self._test_idempotency(self._test_create_cluster, ['nfs', 'cluster', 'create', self.cluster_id]) |
424 | self._test_idempotency(self._test_delete_cluster, ['nfs', 'cluster', 'rm', self.cluster_id]) | |
f6b5b4d7 TL |
425 | |
426 | def test_create_cluster_with_invalid_cluster_id(self): | |
427 | ''' | |
428 | Test nfs cluster deployment failure with invalid cluster id. | |
429 | ''' | |
430 | try: | |
431 | invalid_cluster_id = '/cluster_test' # Only [A-Za-z0-9-_.] chars are valid | |
b3b6e05e | 432 | self._nfs_cmd('cluster', 'create', invalid_cluster_id) |
f6b5b4d7 TL |
433 | self.fail(f"Cluster successfully created with invalid cluster id {invalid_cluster_id}") |
434 | except CommandFailedError as e: | |
435 | # Command should fail for test to pass | |
436 | if e.exitstatus != errno.EINVAL: | |
437 | raise | |
438 | ||
f6b5b4d7 TL |
439 | def test_create_and_delete_export(self): |
440 | ''' | |
441 | Test successful creation and deletion of the cephfs export. | |
442 | ''' | |
443 | self._create_default_export() | |
444 | self._test_get_export() | |
f91f0fd5 TL |
445 | port, ip = self._get_port_ip_info() |
446 | self._test_mnt(self.pseudo_path, port, ip) | |
f6b5b4d7 TL |
447 | self._delete_export() |
448 | # Check if rados export object is deleted | |
449 | self._check_export_obj_deleted() | |
f91f0fd5 | 450 | self._test_mnt(self.pseudo_path, port, ip, False) |
f6b5b4d7 TL |
451 | self._test_delete_cluster() |
452 | ||
453 | def test_create_delete_export_idempotency(self): | |
454 | ''' | |
455 | Test idempotency of export create and delete commands. | |
456 | ''' | |
a4b75251 TL |
457 | self._test_idempotency(self._create_default_export, [ |
458 | 'nfs', 'export', 'create', 'cephfs', | |
459 | '--fsname', self.fs_name, '--cluster-id', self.cluster_id, | |
460 | '--pseudo-path', self.pseudo_path]) | |
b3b6e05e | 461 | self._test_idempotency(self._delete_export, ['nfs', 'export', 'rm', self.cluster_id, |
f6b5b4d7 TL |
462 | self.pseudo_path]) |
463 | self._test_delete_cluster() | |
464 | ||
465 | def test_create_multiple_exports(self): | |
466 | ''' | |
467 | Test creating multiple exports with different access type and path. | |
468 | ''' | |
469 | # Export-1 with default values (access type = rw and path = '\') | |
470 | self._create_default_export() | |
471 | # Export-2 with r only | |
a4b75251 TL |
472 | self._create_export(export_id='2', |
473 | extra_cmd=['--pseudo-path', self.pseudo_path+'1', '--readonly']) | |
f6b5b4d7 TL |
474 | # Export-3 for subvolume with r only |
475 | self._cmd('fs', 'subvolume', 'create', self.fs_name, 'sub_vol') | |
476 | fs_path = self._cmd('fs', 'subvolume', 'getpath', self.fs_name, 'sub_vol').strip() | |
a4b75251 TL |
477 | self._create_export(export_id='3', |
478 | extra_cmd=['--pseudo-path', self.pseudo_path+'2', '--readonly', | |
479 | '--path', fs_path]) | |
f6b5b4d7 | 480 | # Export-4 for subvolume |
a4b75251 TL |
481 | self._create_export(export_id='4', |
482 | extra_cmd=['--pseudo-path', self.pseudo_path+'3', | |
483 | '--path', fs_path]) | |
f6b5b4d7 TL |
484 | # Check if exports gets listed |
485 | self._test_list_detailed(fs_path) | |
486 | self._test_delete_cluster() | |
487 | # Check if rados ganesha conf object is deleted | |
488 | self._check_export_obj_deleted(conf_obj=True) | |
489 | self._check_auth_ls() | |
490 | ||
491 | def test_exports_on_mgr_restart(self): | |
492 | ''' | |
493 | Test export availability on restarting mgr. | |
494 | ''' | |
495 | self._create_default_export() | |
496 | # unload and load module will restart the mgr | |
497 | self._unload_module("cephadm") | |
498 | self._load_module("cephadm") | |
499 | self._orch_cmd("set", "backend", "cephadm") | |
f67539c2 TL |
500 | # Check if ganesha daemon is running |
501 | self._check_nfs_cluster_status('running', 'Failed to redeploy NFS Ganesha cluster') | |
f6b5b4d7 TL |
502 | # Checks if created export is listed |
503 | self._test_list_export() | |
f91f0fd5 TL |
504 | port, ip = self._get_port_ip_info() |
505 | self._test_mnt(self.pseudo_path, port, ip) | |
f6b5b4d7 TL |
506 | self._delete_export() |
507 | self._test_delete_cluster() | |
508 | ||
509 | def test_export_create_with_non_existing_fsname(self): | |
510 | ''' | |
511 | Test creating export with non-existing filesystem. | |
512 | ''' | |
513 | try: | |
514 | fs_name = 'nfs-test' | |
515 | self._test_create_cluster() | |
a4b75251 TL |
516 | self._nfs_cmd('export', 'create', 'cephfs', |
517 | '--fsname', fs_name, '--cluster-id', self.cluster_id, | |
518 | '--pseudo-path', self.pseudo_path) | |
f6b5b4d7 TL |
519 | self.fail(f"Export created with non-existing filesystem {fs_name}") |
520 | except CommandFailedError as e: | |
521 | # Command should fail for test to pass | |
522 | if e.exitstatus != errno.ENOENT: | |
523 | raise | |
524 | finally: | |
525 | self._test_delete_cluster() | |
526 | ||
527 | def test_export_create_with_non_existing_clusterid(self): | |
528 | ''' | |
529 | Test creating cephfs export with non-existing nfs cluster. | |
530 | ''' | |
531 | try: | |
532 | cluster_id = 'invalidtest' | |
a4b75251 TL |
533 | self._nfs_cmd('export', 'create', 'cephfs', '--fsname', self.fs_name, |
534 | '--cluster-id', cluster_id, '--pseudo-path', self.pseudo_path) | |
f6b5b4d7 TL |
535 | self.fail(f"Export created with non-existing cluster id {cluster_id}") |
536 | except CommandFailedError as e: | |
537 | # Command should fail for test to pass | |
538 | if e.exitstatus != errno.ENOENT: | |
539 | raise | |
540 | ||
541 | def test_export_create_with_relative_pseudo_path_and_root_directory(self): | |
542 | ''' | |
543 | Test creating cephfs export with relative or '/' pseudo path. | |
544 | ''' | |
545 | def check_pseudo_path(pseudo_path): | |
546 | try: | |
a4b75251 TL |
547 | self._nfs_cmd('export', 'create', 'cephfs', '--fsname', self.fs_name, |
548 | '--cluster-id', self.cluster_id, | |
549 | '--pseudo-path', pseudo_path) | |
f6b5b4d7 TL |
550 | self.fail(f"Export created for {pseudo_path}") |
551 | except CommandFailedError as e: | |
552 | # Command should fail for test to pass | |
553 | if e.exitstatus != errno.EINVAL: | |
554 | raise | |
555 | ||
556 | self._test_create_cluster() | |
557 | self._cmd('fs', 'volume', 'create', self.fs_name) | |
558 | check_pseudo_path('invalidpath') | |
559 | check_pseudo_path('/') | |
560 | check_pseudo_path('//') | |
f91f0fd5 | 561 | self._cmd('fs', 'volume', 'rm', self.fs_name, '--yes-i-really-mean-it') |
f6b5b4d7 TL |
562 | self._test_delete_cluster() |
563 | ||
f67539c2 TL |
564 | def test_write_to_read_only_export(self): |
565 | ''' | |
566 | Test write to readonly export. | |
567 | ''' | |
568 | self._test_create_cluster() | |
a4b75251 TL |
569 | self._create_export(export_id='1', create_fs=True, |
570 | extra_cmd=['--pseudo-path', self.pseudo_path, '--readonly']) | |
f67539c2 | 571 | port, ip = self._get_port_ip_info() |
a4b75251 | 572 | self._check_nfs_cluster_status('running', 'NFS Ganesha cluster restart failed') |
f67539c2 TL |
573 | self._write_to_read_only_export(self.pseudo_path, port, ip) |
574 | self._test_delete_cluster() | |
575 | ||
f6b5b4d7 TL |
576 | def test_cluster_info(self): |
577 | ''' | |
578 | Test cluster info outputs correct ip and hostname | |
579 | ''' | |
580 | self._test_create_cluster() | |
581 | info_output = json.loads(self._nfs_cmd('cluster', 'info', self.cluster_id)) | |
b3b6e05e TL |
582 | print(f'info {info_output}') |
583 | info_ip = info_output[self.cluster_id].get('backend', [])[0].pop("ip") | |
584 | host_details = { | |
585 | self.cluster_id: { | |
586 | 'backend': [ | |
587 | { | |
588 | "hostname": self._sys_cmd(['hostname']).decode("utf-8").strip(), | |
589 | "port": 2049 | |
590 | } | |
591 | ], | |
592 | "virtual_ip": None, | |
593 | } | |
594 | } | |
adb31ebb | 595 | host_ip = self._sys_cmd(['hostname', '-I']).decode("utf-8").split() |
b3b6e05e | 596 | print(f'host_ip is {host_ip}, info_ip is {info_ip}') |
f6b5b4d7 | 597 | self.assertDictEqual(info_output, host_details) |
b3b6e05e | 598 | self.assertTrue(info_ip in host_ip) |
f6b5b4d7 TL |
599 | self._test_delete_cluster() |
600 | ||
601 | def test_cluster_set_reset_user_config(self): | |
602 | ''' | |
603 | Test cluster is created using user config and reverts back to default | |
604 | config on reset. | |
605 | ''' | |
606 | self._test_create_cluster() | |
f6b5b4d7 | 607 | |
a4b75251 | 608 | pool = NFS_POOL_NAME |
f6b5b4d7 TL |
609 | user_id = 'test' |
610 | fs_name = 'user_test_fs' | |
f91f0fd5 | 611 | pseudo_path = '/ceph' |
f6b5b4d7 TL |
612 | self._cmd('fs', 'volume', 'create', fs_name) |
613 | time.sleep(20) | |
614 | key = self._cmd('auth', 'get-or-create-key', f'client.{user_id}', 'mon', | |
615 | 'allow r', 'osd', | |
616 | f'allow rw pool={pool} namespace={self.cluster_id}, allow rw tag cephfs data={fs_name}', | |
617 | 'mds', f'allow rw path={self.path}').strip() | |
618 | config = f""" LOG {{ | |
619 | Default_log_level = FULL_DEBUG; | |
620 | }} | |
621 | ||
622 | EXPORT {{ | |
623 | Export_Id = 100; | |
624 | Transports = TCP; | |
625 | Path = /; | |
f91f0fd5 | 626 | Pseudo = {pseudo_path}; |
f6b5b4d7 TL |
627 | Protocols = 4; |
628 | Access_Type = RW; | |
629 | Attr_Expiration_Time = 0; | |
630 | Squash = None; | |
631 | FSAL {{ | |
632 | Name = CEPH; | |
633 | Filesystem = {fs_name}; | |
634 | User_Id = {user_id}; | |
635 | Secret_Access_Key = '{key}'; | |
636 | }} | |
637 | }}""" | |
f91f0fd5 | 638 | port, ip = self._get_port_ip_info() |
a4b75251 | 639 | self.ctx.cluster.run(args=['ceph', 'nfs', 'cluster', 'config', |
f6b5b4d7 TL |
640 | 'set', self.cluster_id, '-i', '-'], stdin=config) |
641 | time.sleep(30) | |
f6b5b4d7 | 642 | res = self._sys_cmd(['rados', '-p', pool, '-N', self.cluster_id, 'get', |
f67539c2 | 643 | f'userconf-nfs.{user_id}', '-']) |
f6b5b4d7 | 644 | self.assertEqual(config, res.decode('utf-8')) |
f91f0fd5 | 645 | self._test_mnt(pseudo_path, port, ip) |
f6b5b4d7 | 646 | self._nfs_cmd('cluster', 'config', 'reset', self.cluster_id) |
a4b75251 | 647 | rados_obj_ls = self._sys_cmd(['rados', '-p', NFS_POOL_NAME, '-N', self.cluster_id, 'ls']) |
f6b5b4d7 TL |
648 | if b'conf-nfs' not in rados_obj_ls and b'userconf-nfs' in rados_obj_ls: |
649 | self.fail("User config not deleted") | |
650 | time.sleep(30) | |
f91f0fd5 | 651 | self._test_mnt(pseudo_path, port, ip, False) |
f6b5b4d7 TL |
652 | self._cmd('fs', 'volume', 'rm', fs_name, '--yes-i-really-mean-it') |
653 | self._test_delete_cluster() | |
f6b5b4d7 TL |
654 | |
655 | def test_cluster_set_user_config_with_non_existing_clusterid(self): | |
656 | ''' | |
657 | Test setting user config for non-existing nfs cluster. | |
658 | ''' | |
1e59de90 TL |
659 | cluster_id = 'invalidtest' |
660 | with contextutil.safe_while(sleep=3, tries=3) as proceed: | |
661 | while proceed(): | |
662 | try: | |
663 | self.ctx.cluster.run(args=['ceph', 'nfs', 'cluster', | |
664 | 'config', 'set', cluster_id, | |
665 | '-i', '-'], stdin='testing') | |
666 | self.fail(f"User config set for non-existing cluster" | |
667 | f"{cluster_id}") | |
668 | except CommandFailedError as e: | |
669 | # Command should fail for test to pass | |
670 | if e.exitstatus == errno.ENOENT: | |
671 | break | |
672 | log.warning('exitstatus != ENOENT, retrying') | |
f6b5b4d7 TL |
673 | |
674 | def test_cluster_reset_user_config_with_non_existing_clusterid(self): | |
675 | ''' | |
676 | Test resetting user config for non-existing nfs cluster. | |
677 | ''' | |
678 | try: | |
679 | cluster_id = 'invalidtest' | |
680 | self._nfs_cmd('cluster', 'config', 'reset', cluster_id) | |
681 | self.fail(f"User config reset for non-existing cluster {cluster_id}") | |
682 | except CommandFailedError as e: | |
683 | # Command should fail for test to pass | |
684 | if e.exitstatus != errno.ENOENT: | |
685 | raise | |
f67539c2 | 686 | |
a4b75251 TL |
687 | def test_create_export_via_apply(self): |
688 | ''' | |
689 | Test creation of export via apply | |
690 | ''' | |
691 | self._test_create_cluster() | |
692 | self.ctx.cluster.run(args=['ceph', 'nfs', 'export', 'apply', | |
693 | self.cluster_id, '-i', '-'], | |
694 | stdin=json.dumps({ | |
695 | "path": "/", | |
696 | "pseudo": "/cephfs", | |
697 | "squash": "none", | |
698 | "access_type": "rw", | |
699 | "protocols": [4], | |
700 | "fsal": { | |
701 | "name": "CEPH", | |
702 | "fs_name": self.fs_name | |
703 | } | |
704 | })) | |
705 | port, ip = self._get_port_ip_info() | |
706 | self._test_mnt(self.pseudo_path, port, ip) | |
33c7a0ef TL |
707 | self._check_nfs_cluster_status( |
708 | 'running', 'NFS Ganesha cluster not running after new export was applied') | |
a4b75251 TL |
709 | self._test_delete_cluster() |
710 | ||
f67539c2 TL |
711 | def test_update_export(self): |
712 | ''' | |
33c7a0ef | 713 | Test update of export's pseudo path and access type from rw to ro |
f67539c2 TL |
714 | ''' |
715 | self._create_default_export() | |
716 | port, ip = self._get_port_ip_info() | |
717 | self._test_mnt(self.pseudo_path, port, ip) | |
718 | export_block = self._get_export() | |
719 | new_pseudo_path = '/testing' | |
720 | export_block['pseudo'] = new_pseudo_path | |
721 | export_block['access_type'] = 'RO' | |
a4b75251 TL |
722 | self.ctx.cluster.run(args=['ceph', 'nfs', 'export', 'apply', |
723 | self.cluster_id, '-i', '-'], | |
724 | stdin=json.dumps(export_block)) | |
33c7a0ef TL |
725 | if not self._check_nfs_cluster_event('restart'): |
726 | self.fail("updating export's pseudo path should trigger restart of NFS service") | |
727 | self._check_nfs_cluster_status('running', 'NFS Ganesha cluster not running after restart') | |
f67539c2 TL |
728 | self._write_to_read_only_export(new_pseudo_path, port, ip) |
729 | self._test_delete_cluster() | |
730 | ||
33c7a0ef TL |
731 | def test_update_export_ro_to_rw(self): |
732 | ''' | |
733 | Test update of export's access level from ro to rw | |
734 | ''' | |
735 | self._test_create_cluster() | |
736 | self._create_export( | |
737 | export_id='1', create_fs=True, | |
738 | extra_cmd=['--pseudo-path', self.pseudo_path, '--readonly']) | |
739 | port, ip = self._get_port_ip_info() | |
740 | self._write_to_read_only_export(self.pseudo_path, port, ip) | |
741 | export_block = self._get_export() | |
742 | export_block['access_type'] = 'RW' | |
743 | self.ctx.cluster.run( | |
744 | args=['ceph', 'nfs', 'export', 'apply', self.cluster_id, '-i', '-'], | |
745 | stdin=json.dumps(export_block)) | |
746 | if self._check_nfs_cluster_event('restart'): | |
747 | self.fail("update of export's access type should not trigger NFS service restart") | |
748 | self._test_mnt(self.pseudo_path, port, ip) | |
749 | self._test_delete_cluster() | |
750 | ||
f67539c2 TL |
751 | def test_update_export_with_invalid_values(self): |
752 | ''' | |
753 | Test update of export with invalid values | |
754 | ''' | |
755 | self._create_default_export() | |
756 | export_block = self._get_export() | |
757 | ||
758 | def update_with_invalid_values(key, value, fsal=False): | |
759 | export_block_new = dict(export_block) | |
760 | if fsal: | |
761 | export_block_new['fsal'] = dict(export_block['fsal']) | |
762 | export_block_new['fsal'][key] = value | |
763 | else: | |
764 | export_block_new[key] = value | |
765 | try: | |
a4b75251 TL |
766 | self.ctx.cluster.run(args=['ceph', 'nfs', 'export', 'apply', |
767 | self.cluster_id, '-i', '-'], | |
f67539c2 TL |
768 | stdin=json.dumps(export_block_new)) |
769 | except CommandFailedError: | |
770 | pass | |
771 | ||
772 | update_with_invalid_values('export_id', 9) | |
773 | update_with_invalid_values('cluster_id', 'testing_new') | |
774 | update_with_invalid_values('pseudo', 'test_relpath') | |
775 | update_with_invalid_values('access_type', 'W') | |
776 | update_with_invalid_values('squash', 'no_squash') | |
777 | update_with_invalid_values('security_label', 'invalid') | |
778 | update_with_invalid_values('protocols', [2]) | |
779 | update_with_invalid_values('transports', ['UD']) | |
780 | update_with_invalid_values('name', 'RGW', True) | |
781 | update_with_invalid_values('user_id', 'testing_export', True) | |
782 | update_with_invalid_values('fs_name', 'b', True) | |
783 | self._test_delete_cluster() | |
b3b6e05e TL |
784 | |
785 | def test_cmds_without_reqd_args(self): | |
786 | ''' | |
787 | Test that cmd fails on not passing required arguments | |
788 | ''' | |
789 | def exec_cmd_invalid(*cmd): | |
790 | try: | |
791 | self._nfs_cmd(*cmd) | |
792 | self.fail(f"nfs {cmd} command executed successfully without required arguments") | |
793 | except CommandFailedError as e: | |
794 | # Command should fail for test to pass | |
795 | if e.exitstatus != errno.EINVAL: | |
796 | raise | |
797 | ||
798 | exec_cmd_invalid('cluster', 'create') | |
799 | exec_cmd_invalid('cluster', 'delete') | |
800 | exec_cmd_invalid('cluster', 'config', 'set') | |
801 | exec_cmd_invalid('cluster', 'config', 'reset') | |
802 | exec_cmd_invalid('export', 'create', 'cephfs') | |
a4b75251 TL |
803 | exec_cmd_invalid('export', 'create', 'cephfs', 'clusterid') |
804 | exec_cmd_invalid('export', 'create', 'cephfs', 'clusterid', 'a_fs') | |
b3b6e05e TL |
805 | exec_cmd_invalid('export', 'ls') |
806 | exec_cmd_invalid('export', 'delete') | |
807 | exec_cmd_invalid('export', 'delete', 'clusterid') | |
a4b75251 TL |
808 | exec_cmd_invalid('export', 'info') |
809 | exec_cmd_invalid('export', 'info', 'clusterid') | |
810 | exec_cmd_invalid('export', 'apply') | |
39ae355f TL |
811 | |
812 | def test_non_existent_cluster(self): | |
813 | """ | |
814 | Test that cluster info doesn't throw junk data for non-existent cluster | |
815 | """ | |
1e59de90 TL |
816 | cluster_ls = self._nfs_cmd('cluster', 'ls') |
817 | self.assertNotIn('foo', cluster_ls, 'cluster foo exists') | |
818 | try: | |
819 | self._nfs_cmd('cluster', 'info', 'foo') | |
820 | self.fail("nfs cluster info foo returned successfully for non-existent cluster") | |
821 | except CommandFailedError as e: | |
822 | if e.exitstatus != errno.ENOENT: | |
823 | raise | |
824 | ||
825 | def test_nfs_export_with_invalid_path(self): | |
826 | """ | |
827 | Test that nfs exports can't be created with invalid path | |
828 | """ | |
829 | mnt_pt = '/mnt' | |
830 | preserve_mode = self._sys_cmd(['stat', '-c', '%a', mnt_pt]) | |
831 | self._create_cluster_with_fs(self.fs_name, mnt_pt) | |
832 | try: | |
833 | self._create_export(export_id='123', | |
834 | extra_cmd=['--pseudo-path', self.pseudo_path, | |
835 | '--path', '/non_existent_dir']) | |
836 | except CommandFailedError as e: | |
837 | if e.exitstatus != errno.ENOENT: | |
838 | raise | |
839 | self._delete_cluster_with_fs(self.fs_name, mnt_pt, preserve_mode) | |
840 | ||
841 | def test_nfs_export_creation_at_filepath(self): | |
842 | """ | |
843 | Test that nfs exports can't be created at a filepath | |
844 | """ | |
845 | mnt_pt = '/mnt' | |
846 | preserve_mode = self._sys_cmd(['stat', '-c', '%a', mnt_pt]) | |
847 | self._create_cluster_with_fs(self.fs_name, mnt_pt) | |
848 | self.ctx.cluster.run(args=['touch', f'{mnt_pt}/testfile']) | |
849 | try: | |
850 | self._create_export(export_id='123', extra_cmd=['--pseudo-path', | |
851 | self.pseudo_path, | |
852 | '--path', | |
853 | '/testfile']) | |
854 | except CommandFailedError as e: | |
855 | if e.exitstatus != errno.ENOTDIR: | |
856 | raise | |
857 | self.ctx.cluster.run(args=['rm', '-rf', '/mnt/testfile']) | |
858 | self._delete_cluster_with_fs(self.fs_name, mnt_pt, preserve_mode) | |
859 | ||
860 | def test_nfs_export_creation_at_symlink(self): | |
861 | """ | |
862 | Test that nfs exports can't be created at a symlink path | |
863 | """ | |
864 | mnt_pt = '/mnt' | |
865 | preserve_mode = self._sys_cmd(['stat', '-c', '%a', mnt_pt]) | |
866 | self._create_cluster_with_fs(self.fs_name, mnt_pt) | |
867 | self.ctx.cluster.run(args=['mkdir', f'{mnt_pt}/testdir']) | |
868 | self.ctx.cluster.run(args=['ln', '-s', f'{mnt_pt}/testdir', | |
869 | f'{mnt_pt}/testdir_symlink']) | |
870 | try: | |
871 | self._create_export(export_id='123', | |
872 | extra_cmd=['--pseudo-path', | |
873 | self.pseudo_path, | |
874 | '--path', | |
875 | '/testdir_symlink']) | |
876 | except CommandFailedError as e: | |
877 | if e.exitstatus != errno.ENOTDIR: | |
878 | raise | |
879 | self.ctx.cluster.run(args=['rm', '-rf', f'{mnt_pt}/*']) | |
880 | self._delete_cluster_with_fs(self.fs_name, mnt_pt, preserve_mode) |